Compare commits

..

No commits in common. 'c9' and 'c8-stream-5' have entirely different histories.

4
.gitignore vendored

@ -1,2 +1,2 @@
SOURCES/redis-6.2.7.tar.gz SOURCES/redis-5.0.3.tar.gz
SOURCES/redis-doc-3fdb6df.tar.gz SOURCES/redis-doc-a1e79fc.tar.gz

@ -1,2 +1,2 @@
b01ef3f117c9815dea41bf2609e489a03c3a5ab1 SOURCES/redis-6.2.7.tar.gz a43c24ea6365482323b78e21752d610756efcc39 SOURCES/redis-5.0.3.tar.gz
1af2e3e6d240e8b87d21bbd4d81fef78e9af7090 SOURCES/redis-doc-3fdb6df.tar.gz f2d0dc6e21bf416d4ff32868a2f0fee415391057 SOURCES/redis-doc-a1e79fc.tar.gz

@ -1,18 +1,18 @@
From d68953c34d4d6987883ddf6158c3c69e7500667f Mon Sep 17 00:00:00 2001 From c7958ad1c0d615b81276ec2d4dbc1bf6a67dcc4d Mon Sep 17 00:00:00 2001
From: Remi Collet <fedora@famillecollet.com> From: Remi Collet <fedora@famillecollet.com>
Date: Thu, 8 Sep 2016 14:51:15 +0200 Date: Thu, 8 Sep 2016 14:51:15 +0200
Subject: [PATCH 1/3] 1st man pageis for - redis-cli - redis-benchmark - Subject: [PATCH 1/2] 1st man pageis for - redis-cli - redis-benchmark -
redis-check-aof - redis-check-rdb - redis-server - redis.conf redis-check-aof - redis-check-rdb - redis-server - redis.conf
as redis-sentinel is a symlink to redis-server, same page can be used (also symlinked) as redis-sentinel is a symlink to redis-server, same page can be used (also symlinked)
redis.conf can also be used for sentinel.conf redis.conf can also be used for sentinel.conf
--- ---
man/man1/redis-benchmark.1 | 132 ++++++++++++++++++++++++++++ man/man1/redis-benchmark.1 | 132 ++++++++++++++++++++++++++++++++++
man/man1/redis-check-aof.1 | 60 +++++++++++++ man/man1/redis-check-aof.1 | 60 ++++++++++++++++
man/man1/redis-check-rdb.1 | 53 ++++++++++++ man/man1/redis-check-rdb.1 | 53 ++++++++++++++
man/man1/redis-cli.1 | 171 +++++++++++++++++++++++++++++++++++++ man/man1/redis-cli.1 | 171 +++++++++++++++++++++++++++++++++++++++++++++
man/man1/redis-server.1 | 117 +++++++++++++++++++++++++ man/man1/redis-server.1 | 117 +++++++++++++++++++++++++++++++
man/man5/redis.conf.5 | 57 +++++++++++++ man/man5/redis.conf.5 | 57 +++++++++++++++
6 files changed, 590 insertions(+) 6 files changed, 590 insertions(+)
create mode 100644 man/man1/redis-benchmark.1 create mode 100644 man/man1/redis-benchmark.1
create mode 100644 man/man1/redis-check-aof.1 create mode 100644 man/man1/redis-check-aof.1
@ -648,5 +648,5 @@ index 0000000..1e0c9c9
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- --
2.24.1 2.13.5

@ -0,0 +1,26 @@
From 992c773e70462a6fbe1536e18e673c9ab55d5901 Mon Sep 17 00:00:00 2001
From: Remi Collet <fedora@famillecollet.com>
Date: Fri, 9 Sep 2016 17:23:27 +0200
Subject: [PATCH 2/2] install redis-check-rdb as a symlink instead of duplicating
the binary
---
src/Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/Makefile b/src/Makefile
index fdbe36a..c3083f8 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -287,6 +287,6 @@ install: all
$(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(INSTALL_BIN)
$(REDIS_INSTALL) $(REDIS_BENCHMARK_NAME) $(INSTALL_BIN)
$(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN)
- $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN)
- $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN)
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME)
+ @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_RDB_NAME)
+ @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_AOF_NAME)
--
2.13.5

@ -0,0 +1,117 @@
From 9f13b2bd4967334b1701c6eccdf53760cb13f79e Mon Sep 17 00:00:00 2001
From: John Sully <john@csquare.ca>
Date: Thu, 14 Mar 2019 14:02:16 -0400
Subject: [PATCH] Fix hyperloglog corruption
---
src/hyperloglog.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
index fc21ea0065d..e993bf26e1d 100644
--- a/src/hyperloglog.c
+++ b/src/hyperloglog.c
@@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) {
} else {
runlen = HLL_SPARSE_VAL_LEN(p);
regval = HLL_SPARSE_VAL_VALUE(p);
+ if ((runlen + idx) > HLL_REGISTERS) {
+ sdsfree(dense);
+ return C_ERR;
+ }
while(runlen--) {
HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
idx++;
@@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) {
} else {
runlen = HLL_SPARSE_VAL_LEN(p);
regval = HLL_SPARSE_VAL_VALUE(p);
+ if ((runlen + i) > HLL_REGISTERS)
+ return C_ERR;
while(runlen--) {
if (regval > max[i]) max[i] = regval;
i++;
From e216ceaf0e099536fe3658a29dcb725d812364e0 Mon Sep 17 00:00:00 2001
From: antirez <antirez@gmail.com>
Date: Fri, 15 Mar 2019 17:16:06 +0100
Subject: [PATCH] HyperLogLog: handle wrong offset in the base case.
---
src/hyperloglog.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
index 526510b43b9..1e7ce3dceb7 100644
--- a/src/hyperloglog.c
+++ b/src/hyperloglog.c
@@ -614,10 +614,7 @@ int hllSparseToDense(robj *o) {
} else {
runlen = HLL_SPARSE_VAL_LEN(p);
regval = HLL_SPARSE_VAL_VALUE(p);
- if ((runlen + idx) > HLL_REGISTERS) {
- sdsfree(dense);
- return C_ERR;
- }
+ if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */
while(runlen--) {
HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
idx++;
@@ -1097,8 +1094,7 @@ int hllMerge(uint8_t *max, robj *hll) {
} else {
runlen = HLL_SPARSE_VAL_LEN(p);
regval = HLL_SPARSE_VAL_VALUE(p);
- if ((runlen + i) > HLL_REGISTERS)
- return C_ERR;
+ if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */
while(runlen--) {
if (regval > max[i]) max[i] = regval;
i++;
From 4208666797b5831eefc022ae46ab5747200cd671 Mon Sep 17 00:00:00 2001
From: antirez <antirez@gmail.com>
Date: Fri, 15 Mar 2019 13:52:29 +0100
Subject: [PATCH] HyperLogLog: dense/sparse repr parsing fuzz test.
---
tests/unit/hyperloglog.tcl | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl
index 7d36b7a351f..6a9c47b11c5 100644
--- a/tests/unit/hyperloglog.tcl
+++ b/tests/unit/hyperloglog.tcl
@@ -115,6 +115,35 @@ start_server {tags {"hll"}} {
set e
} {*WRONGTYPE*}
+ test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
+ for {set j 0} {$j < 10000} {incr j} {
+ r del hll
+ set items {}
+ set numitems [randomInt 3000]
+ for {set i 0} {$i < $numitems} {incr i} {
+ lappend items [expr {rand()}]
+ }
+ r pfadd hll {*}$items
+
+ # Corrupt it in some random way.
+ for {set i 0} {$i < 5} {incr i} {
+ set len [r strlen hll]
+ set pos [randomInt $len]
+ set byte [randstring 1 1 binary]
+ r setrange hll $pos $byte
+ # Don't modify more bytes 50% of times
+ if {rand() < 0.5} break
+ }
+
+ # Use the hyperloglog to check if it crashes
+ # Redis in some way.
+ catch {
+ r pfcount hll
+ r pfdebug getreg hll
+ }
+ }
+ }
+
test {PFADD, PFCOUNT, PFMERGE type checking works} {
r set foo bar
catch {r pfadd foo 1} e

@ -0,0 +1,27 @@
From a4b90be9fcd5e1668ac941cabce3b1ab38dbe326 Mon Sep 17 00:00:00 2001
From: antirez <antirez@gmail.com>
Date: Fri, 15 Mar 2019 17:10:16 +0100
Subject: [PATCH] HyperLogLog: enlarge reghisto variable for safety.
---
src/hyperloglog.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
index e993bf26e1d..526510b43b9 100644
--- a/src/hyperloglog.c
+++ b/src/hyperloglog.c
@@ -1017,7 +1017,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) {
double m = HLL_REGISTERS;
double E;
int j;
- int reghisto[HLL_Q+2] = {0};
+ /* Note that reghisto could be just HLL_Q+1, becuase this is the
+ * maximum frequency of the "000...1" sequence the hash function is
+ * able to return. However it is slow to check for sanity of the
+ * input: instead we history array at a safe size: overflows will
+ * just write data to wrong, but correctly allocated, places. */
+ int reghisto[64] = {0};
/* Compute register histogram */
if (hdr->encoding == HLL_DENSE) {

@ -0,0 +1,120 @@
Backported for 5.0.3
From a4b813d8b844094fcd77c511af596866043b20c8 Mon Sep 17 00:00:00 2001
From: "meir@redislabs.com" <meir@redislabs.com>
Date: Sun, 13 Jun 2021 14:27:18 +0300
Subject: [PATCH] Fix invalid memory write on lua stack overflow
{CVE-2021-32626}
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When LUA call our C code, by default, the LUA stack has room for 20
elements. In most cases, this is more than enough but sometimes it's not
and the caller must verify the LUA stack size before he pushes elements.
On 3 places in the code, there was no verification of the LUA stack size.
On specific inputs this missing verification could have lead to invalid
memory write:
1. On 'luaReplyToRedisReply', one might return a nested reply that will
explode the LUA stack.
2. On 'redisProtocolToLuaType', the Redis reply might be deep enough
   to explode the LUA stack (notice that currently there is no such
   command in Redis that returns such a nested reply, but modules might
   do it)
3. On 'ldbRedis', one might give a command with enough arguments to
   explode the LUA stack (all the arguments will be pushed to the LUA
   stack)
This commit is solving all those 3 issues by calling 'lua_checkstack' and
verify that there is enough room in the LUA stack to push elements. In
case 'lua_checkstack' returns an error (there is not enough room in the
LUA stack and it's not possible to increase the stack), we will do the
following:
1. On 'luaReplyToRedisReply', we will return an error to the user.
2. On 'redisProtocolToLuaType' we will exit with panic (we assume this
scenario is rare because it can only happen with a module).
3. On 'ldbRedis', we return an error.
(cherry picked from commit d32a3f74f2a343846b50920e95754a955c1a10a9)
---
src/scripting.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/src/scripting.c b/src/scripting.c
index db1e4d4b5f1..153b942404e 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -125,6 +125,16 @@ void sha1hex(char *digest, char *script, size_t len) {
*/
char *redisProtocolToLuaType(lua_State *lua, char* reply) {
+
+ if (!lua_checkstack(lua, 5)) {
+ /*
+ * Increase the Lua stack if needed, to make sure there is enough room
+ * to push 5 elements to the stack. On failure, exit with panic.
+         * Notice that we need, in the worst case, 5 elements because redisProtocolToLuaType_Aggregate
+         * might push 5 elements to the Lua stack.*/
+ serverPanic("lua stack limit reach when parsing redis.call reply");
+ }
+
char *p = reply;
switch(*p) {
@@ -275,6 +285,17 @@ void luaSortArray(lua_State *lua) {
* ------------------------------------------------------------------------- */
void luaReplyToRedisReply(client *c, lua_State *lua) {
+
+ if (!lua_checkstack(lua, 4)) {
+ /* Increase the Lua stack if needed to make sure there is enough room
+ * to push 4 elements to the stack. On failure, return error.
+         * Notice that we need, in the worst case, 4 elements because returning a map might
+ * require push 4 elements to the Lua stack.*/
+ addReplyErrorFormat(c, "reached lua stack limit");
+ lua_pop(lua,1); // pop the element from the stack
+ return;
+ }
+
int t = lua_type(lua,-1);
switch(t) {
@@ -292,6 +313,9 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
* Error are returned as a single element table with 'err' field.
* Status replies are returned as single element table with 'ok'
* field. */
+
+ /* Handle error reply. */
+ /* we took care of the stack size on function start */
lua_pushstring(lua,"err");
lua_gettable(lua,-2);
t = lua_type(lua,-1);
@@ -320,6 +344,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) {
lua_pop(lua,1); /* Discard the 'ok' field value we popped */
while(1) {
+ /* we took care of the stack size on function start */
lua_pushnumber(lua,j++);
lua_gettable(lua,-2);
t = lua_type(lua,-1);
@@ -2231,6 +2256,17 @@ void ldbEval(lua_State *lua, sds *argv, int argc) {
void ldbRedis(lua_State *lua, sds *argv, int argc) {
int j, saved_rc = server.lua_replicate_commands;
+ if (!lua_checkstack(lua, argc + 1)) {
+ /* Increase the Lua stack if needed to make sure there is enough room
+ * to push 'argc + 1' elements to the stack. On failure, return error.
+         * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments
+         * given by the user (without the first argument) and we also push the 'redis' global table and
+         * 'redis.call' function so:
+         * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/
+ ldbLogRedisReply("max lua stack reached");
+ return;
+ }
+
lua_getglobal(lua,"redis");
lua_pushstring(lua,"call");
lua_gettable(lua,-2); /* Stack: redis, redis.call */

@ -0,0 +1,775 @@
Backported for 5.0.3
From 6facfb7a103b26b9a602253a738b2130afb7c5d3 Mon Sep 17 00:00:00 2001
From: Oran Agra <oran@redislabs.com>
Date: Thu, 3 Jun 2021 12:10:02 +0300
Subject: [PATCH] Fix ziplist and listpack overflows and truncations
(CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
(cherry picked from commit 68e221a3f98a427805d31c1760b4cdf37ba810ab)
---
src/geo.c | 5 +-
src/listpack.c | 2 +-
src/quicklist.c | 17 ++++-
src/rdb.c | 36 ++++++---
src/server.h | 2 +-
src/t_hash.c | 13 +++-
src/t_list.c | 25 ++++++
src/t_stream.c | 48 +++++++++---
src/t_zset.c | 43 +++++++----
src/ziplist.c | 17 ++++-
src/ziplist.h | 1 +
tests/support/util.tcl | 21 +++++
tests/unit/violations.tcl | 156 ++++++++++++++++++++++++++++++++++++++
13 files changed, 338 insertions(+), 48 deletions(-)
create mode 100644 tests/unit/violations.tcl
diff --git a/src/geo.c b/src/geo.c
index f1d3f18d46e7..b94fcc1b3d70 100644
--- a/src/geo.c
+++ b/src/geo.c
@@ -635,7 +635,7 @@ void georadiusGeneric(client *c, int fla
robj *zobj;
zset *zs;
int i;
- size_t maxelelen = 0;
+ size_t maxelelen = 0, totelelen = 0;
if (returned_items) {
zobj = createZsetObject();
@@ -650,13 +650,14 @@ void georadiusGeneric(client *c, int fla
size_t elelen = sdslen(gp->member);
if (maxelelen < elelen) maxelelen = elelen;
+ totelelen += elelen;
znode = zslInsert(zs->zsl,score,gp->member);
serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK);
gp->member = NULL;
}
if (returned_items) {
- zsetConvertToZiplistIfNeeded(zobj,maxelelen);
+ zsetConvertToZiplistIfNeeded(zobj,maxelelen,totelelen);
setKey(c->db,storekey,zobj);
decrRefCount(zobj);
notifyKeyspaceEvent(NOTIFY_LIST,"georadiusstore",storekey,
diff --git a/src/listpack.c b/src/listpack.c
index e1f4d9a02ee8..cd5583ccb258 100644
--- a/src/listpack.c
+++ b/src/listpack.c
@@ -283,7 +283,7 @@ int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, ui
} else {
if (size < 64) *enclen = 1+size;
else if (size < 4096) *enclen = 2+size;
- else *enclen = 5+size;
+ else *enclen = 5+(uint64_t)size;
return LP_ENCODING_STRING;
}
}
diff --git a/src/quicklist.c b/src/quicklist.c
index 7b5484116785..d5cc758b2fa0 100644
--- a/src/quicklist.c
+++ b/src/quicklist.c
@@ -29,6 +29,7 @@
*/
#include <string.h> /* for memcpy */
+#include "redisassert.h"
#include "quicklist.h"
#include "zmalloc.h"
#include "ziplist.h"
@@ -43,11 +44,16 @@
#define REDIS_STATIC static
#endif
-/* Optimization levels for size-based filling */
+/* Optimization levels for size-based filling.
+ * Note that the largest possible limit is 16k, so even if each record takes
+ * just one byte, it still won't overflow the 16 bit count field. */
static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536};
/* Maximum size in bytes of any multi-element ziplist.
- * Larger values will live in their own isolated ziplists. */
+ * Larger values will live in their own isolated ziplists.
+ * This is used only if we're limited by record count. when we're limited by
+ * size, the maximum limit is bigger, but still safe.
+ * 8k is a recommended / default size limit */
#define SIZE_SAFETY_LIMIT 8192
/* Minimum ziplist size in bytes for attempting compression. */
@@ -441,6 +447,8 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node,
unsigned int new_sz = node->sz + sz + ziplist_overhead;
if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(new_sz, fill)))
return 1;
+ /* when we return 1 above we know that the limit is a size limit (which is
+ * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
else if (!sizeMeetsSafetyLimit(new_sz))
return 0;
else if ((int)node->count < fill)
@@ -460,6 +468,8 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
unsigned int merge_sz = a->sz + b->sz - 11;
if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(merge_sz, fill)))
return 1;
+ /* when we return 1 above we know that the limit is a size limit (which is
+ * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */
else if (!sizeMeetsSafetyLimit(merge_sz))
return 0;
else if ((int)(a->count + b->count) <= fill)
@@ -479,6 +489,7 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a,
* Returns 1 if new head created. */
int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
quicklistNode *orig_head = quicklist->head;
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
if (likely(
_quicklistNodeAllowInsert(quicklist->head, quicklist->fill, sz))) {
quicklist->head->zl =
@@ -502,6 +513,7 @@ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) {
* Returns 1 if new tail created. */
int quicklistPushTail(quicklist *quicklist, void *value, size_t sz) {
quicklistNode *orig_tail = quicklist->tail;
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
if (likely(
_quicklistNodeAllowInsert(quicklist->tail, quicklist->fill, sz))) {
quicklist->tail->zl =
@@ -835,6 +847,7 @@ REDIS_STATIC void _quicklistInsert(quicklist *quicklist, quicklistEntry *entry,
int fill = quicklist->fill;
quicklistNode *node = entry->node;
quicklistNode *new_node = NULL;
+ assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */
if (!node) {
/* we have no reference node, so let's create only node in the list */
diff --git a/src/rdb.c b/src/rdb.c
index 3c58a1eaf7fb..c7dc724f3df6 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -1452,7 +1452,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
} else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) {
/* Read list/set value. */
uint64_t zsetlen;
- size_t maxelelen = 0;
+ size_t maxelelen = 0, totelelen = 0;
zset *zs;
if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
@@ -1479,6 +1479,7 @@ robj *rdbLoadObject(int rdbtype, rio *rd
/* Don't care about integer-encoded strings. */
if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele);
+ totelelen += sdslen(sdsele);
znode = zslInsert(zs->zsl,score,sdsele);
dictAdd(zs->dict,sdsele,&znode->score);
@@ -1486,8 +1487,11 @@ robj *rdbLoadObject(int rdbtype, rio *rd
/* Convert *after* loading, since sorted sets are not stored ordered. */
if (zsetLength(o) <= server.zset_max_ziplist_entries &&
- maxelelen <= server.zset_max_ziplist_value)
- zsetConvert(o,OBJ_ENCODING_ZIPLIST);
+ maxelelen <= server.zset_max_ziplist_value &&
+ ziplistSafeToAdd(NULL, totelelen))
+ {
+ zsetConvert(o,OBJ_ENCODING_ZIPLIST);
+ }
} else if (rdbtype == RDB_TYPE_HASH) {
uint64_t len;
int ret;
@@ -1511,21 +1515,25 @@ robj *rdbLoadObject(int rdbtype, rio *rd
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL))
== NULL) return NULL;
- /* Add pair to ziplist */
- o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
- sdslen(field), ZIPLIST_TAIL);
- o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
- sdslen(value), ZIPLIST_TAIL);
-
/* Convert to hash table if size threshold is exceeded */
if (sdslen(field) > server.hash_max_ziplist_value ||
- sdslen(value) > server.hash_max_ziplist_value)
+ sdslen(value) > server.hash_max_ziplist_value ||
+ !ziplistSafeToAdd(o->ptr, sdslen(field)+sdslen(value)))
{
- sdsfree(field);
- sdsfree(value);
hashTypeConvert(o, OBJ_ENCODING_HT);
+ ret = dictAdd((dict*)o->ptr, field, value);
+ if (ret == DICT_ERR) {
+ rdbExitReportCorruptRDB("Duplicate hash fields detected");
+ }
break;
}
+
+ /* Add pair to ziplist */
+ o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
+ sdslen(field), ZIPLIST_TAIL);
+ o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
+ sdslen(value), ZIPLIST_TAIL);
+
sdsfree(field);
sdsfree(value);
}
@@ -1594,6 +1602,10 @@ robj *rdbLoadObject(int rdbtype, rio *rd
while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) {
if (flen > maxlen) maxlen = flen;
if (vlen > maxlen) maxlen = vlen;
+ if (!ziplistSafeToAdd(zl, (size_t)flen + vlen)) {
+ rdbExitReportCorruptRDB("Hash zipmap too big (%u)", flen);
+ }
+
zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL);
zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL);
}
diff --git a/src/server.h b/src/server.h
index ca868939cf6d..164a82271f44 100644
--- a/src/server.h
+++ b/src/server.h
@@ -1677,7 +1677,7 @@ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range);
unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range);
unsigned long zsetLength(const robj *zobj);
void zsetConvert(robj *zobj, int encoding);
-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
+void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen);
int zsetScore(robj *zobj, sds member, double *score);
unsigned long zslGetRank(zskiplist *zsl, double score, sds o);
int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore);
diff --git a/src/t_hash.c b/src/t_hash.c
index 0ca152df78cc..109522c1322f 100644
--- a/src/t_hash.c
+++ b/src/t_hash.c
@@ -39,17 +39,22 @@
* as their string length can be queried in constant time. */
void hashTypeTryConversion(robj *o, robj **argv, int start, int end) {
int i;
+ size_t sum = 0;
if (o->encoding != OBJ_ENCODING_ZIPLIST) return;
for (i = start; i <= end; i++) {
- if (sdsEncodedObject(argv[i]) &&
- sdslen(argv[i]->ptr) > server.hash_max_ziplist_value)
- {
+ if (!sdsEncodedObject(argv[i]))
+ continue;
+ size_t len = sdslen(argv[i]->ptr);
+ if (len > server.hash_max_ziplist_value) {
hashTypeConvert(o, OBJ_ENCODING_HT);
- break;
+ return;
}
+ sum += len;
}
+ if (!ziplistSafeToAdd(o->ptr, sum))
+ hashTypeConvert(o, OBJ_ENCODING_HT);
}
/* Get the value from a ziplist encoded hash, identified by field.
diff --git a/src/t_list.c b/src/t_list.c
index de417f4705f4..67541554f616 100644
--- a/src/t_list.c
+++ b/src/t_list.c
@@ -29,6 +29,8 @@
#include "server.h"
+#define LIST_MAX_ITEM_SIZE ((1ull<<32)-1024)
+
/*-----------------------------------------------------------------------------
* List API
*----------------------------------------------------------------------------*/
@@ -196,6 +198,14 @@ void listTypeConvert(robj *subject, int enc) {
void pushGenericCommand(client *c, int where) {
int j, pushed = 0;
+
+ for (j = 2; j < c->argc; j++) {
+ if (sdslen(c->argv[j]->ptr) > LIST_MAX_ITEM_SIZE) {
+ addReplyError(c, "Element too large");
+ return;
+ }
+ }
+
robj *lobj = lookupKeyWrite(c->db,c->argv[1]);
if (lobj && lobj->type != OBJ_LIST) {
@@ -277,6 +287,11 @@ void linsertCommand(client *c) {
return;
}
+ if (sdslen(c->argv[4]->ptr) > LIST_MAX_ITEM_SIZE) {
+ addReplyError(c, "Element too large");
+ return;
+ }
+
if ((subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL ||
checkType(c,subject,OBJ_LIST)) return;
@@ -344,6 +359,11 @@ void lsetCommand(client *c) {
long index;
robj *value = c->argv[3];
+ if (sdslen(value->ptr) > LIST_MAX_ITEM_SIZE) {
+ addReplyError(c, "Element too large");
+ return;
+ }
+
if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK))
return;
@@ -493,6 +513,11 @@ void lremCommand(client *c) {
long toremove;
long removed = 0;
+ if (sdslen(obj->ptr) > LIST_MAX_ITEM_SIZE) {
+ addReplyError(c, "Element too large");
+ return;
+ }
+
if ((getLongFromObjectOrReply(c, c->argv[2], &toremove, NULL) != C_OK))
return;
diff --git a/src/t_stream.c b/src/t_stream.c
index d7754985dd03..e7263d68a28f 100644
--- a/src/t_stream.c
+++ b/src/t_stream.c
@@ -40,6 +40,12 @@
#define STREAM_ITEM_FLAG_DELETED (1<<0) /* Entry is delted. Skip it. */
#define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1) /* Same fields as master entry. */
+/* Don't let listpacks grow too big, even if the user config allows it.
+ * doing so can lead to an overflow (trying to store more than 32bit length
+ * into the listpack header), or actually an assertion since lpInsert
+ * will return NULL. */
+#define STREAM_LISTPACK_MAX_SIZE (1<<30)
+
void streamFreeCG(streamCG *cg);
void streamFreeNACK(streamNACK *na);
size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer);
@@ -170,12 +176,31 @@ int streamCompareID(streamID *a, streamI
*
* The function returns C_OK if the item was added, this is always true
* if the ID was generated by the function. However the function may return
- * C_ERR if an ID was given via 'use_id', but adding it failed since the
- * current top ID is greater or equal. */
+ * C_ERR in several cases:
+ * 1. If an ID was given via 'use_id', but adding it failed since the
+ * current top ID is greater or equal. errno will be set to EDOM.
+ * 2. If a size of a single element or the sum of the elements is too big to
+ * be stored into the stream. errno will be set to ERANGE. */
int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) {
/* If an ID was given, check that it's greater than the last entry ID
* or return an error. */
- if (use_id && streamCompareID(use_id,&s->last_id) <= 0) return C_ERR;
+ if (use_id && streamCompareID(use_id,&s->last_id) <= 0) {
+ errno = EDOM;
+ return C_ERR;
+ }
+
+ /* Avoid overflow when trying to add an element to the stream (listpack
+ * can only host up to 32bit length sttrings, and also a total listpack size
+ * can't be bigger than 32bit length. */
+ size_t totelelen = 0;
+ for (int64_t i = 0; i < numfields*2; i++) {
+ sds ele = argv[i]->ptr;
+ totelelen += sdslen(ele);
+ }
+ if (totelelen > STREAM_LISTPACK_MAX_SIZE) {
+ errno = ERANGE;
+ return C_ERR;
+ }
/* Add the new entry. */
raxIterator ri;
@@ -241,9 +266,10 @@ int streamAppendItem(stream *s, robj **a
* if we need to switch to the next one. 'lp' will be set to NULL if
* the current node is full. */
if (lp != NULL) {
- if (server.stream_node_max_bytes &&
- lp_bytes > server.stream_node_max_bytes)
- {
+ size_t node_max_bytes = server.stream_node_max_bytes;
+ if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE)
+ node_max_bytes = STREAM_LISTPACK_MAX_SIZE;
+ if (lp_bytes + totelelen >= node_max_bytes) {
lp = NULL;
} else if (server.stream_node_max_entries) {
int64_t count = lpGetInteger(lpFirst(lp));
@@ -1224,11 +1250,13 @@ void xaddCommand(client *c) {
/* Append using the low level function and return the ID. */
if (streamAppendItem(s,c->argv+field_pos,(c->argc-field_pos)/2,
- &id, id_given ? &id : NULL)
- == C_ERR)
+ &id, id_given ? &id : NULL) == C_ERR)
{
- addReplyError(c,"The ID specified in XADD is equal or smaller than the "
- "target stream top item");
+ if (errno == EDOM)
+ addReplyError(c,"The ID specified in XADD is equal or smaller than "
+ "the target stream top item");
+ else
+ addReplyError(c,"Elements are too large to be stored");
return;
}
addReplyStreamID(c,&id);
diff --git a/src/t_zset.c b/src/t_zset.c
index 56ea39607b52..989d5855e1ea 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -1237,15 +1237,18 @@ void zsetConvert(robj *zobj, int encodin
}
/* Convert the sorted set object into a ziplist if it is not already a ziplist
- * and if the number of elements and the maximum element size is within the
- * expected ranges. */
-void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) {
+ * and if the number of elements and the maximum element size and total elements size
+ * are within the expected ranges. */
+void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen) {
if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return;
zset *zset = zobj->ptr;
if (zset->zsl->length <= server.zset_max_ziplist_entries &&
- maxelelen <= server.zset_max_ziplist_value)
- zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
+ maxelelen <= server.zset_max_ziplist_value &&
+ ziplistSafeToAdd(NULL, totelelen))
+ {
+ zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
+ }
}
/* Return (by reference) the score of the specified member of the sorted set
@@ -1354,21 +1357,28 @@ int zsetAdd(robj *zobj, double score, sd
}
return 1;
} else if (!xx) {
- /* Optimize: check if the element is too large or the list
+ /* check if the element is too large or the list
* becomes too long *before* executing zzlInsert. */
- zobj->ptr = zzlInsert(zobj->ptr,ele,score);
- if (zzlLength(zobj->ptr) > server.zset_max_ziplist_entries)
- zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
- if (sdslen(ele) > server.zset_max_ziplist_value)
+ if (zzlLength(zobj->ptr)+1 > server.zset_max_ziplist_entries ||
+ sdslen(ele) > server.zset_max_ziplist_value ||
+ !ziplistSafeToAdd(zobj->ptr, sdslen(ele)))
+ {
zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
- if (newscore) *newscore = score;
- *flags |= ZADD_ADDED;
- return 1;
+ } else {
+ zobj->ptr = zzlInsert(zobj->ptr,ele,score);
+ if (newscore) *newscore = score;
+ *flags |= ZADD_ADDED;
+ return 1;
+ }
} else {
*flags |= ZADD_NOP;
return 1;
}
- } else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
+ }
+
+ /* Note that the above block handling ziplist would have either returned or
+ * converted the key to skiplist. */
+ if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
zset *zs = zobj->ptr;
zskiplistNode *znode;
dictEntry *de;
@@ -2180,7 +2190,7 @@ void zunionInterGenericCommand(client *c
zsetopsrc *src;
zsetopval zval;
sds tmp;
- size_t maxelelen = 0;
+ size_t maxelelen = 0, totelelen = 0;
robj *dstobj;
zset *dstzset;
zskiplistNode *znode;
@@ -2304,6 +2314,7 @@ void zunionInterGenericCommand(client *c
tmp = zuiNewSdsFromValue(&zval);
znode = zslInsert(dstzset->zsl,score,tmp);
dictAdd(dstzset->dict,tmp,&znode->score);
+ totelelen += sdslen(tmp);
if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
}
}
@@ -2340,6 +2351,7 @@ void zunionInterGenericCommand(client *c
/* Remember the longest single element encountered,
* to understand if it's possible to convert to ziplist
* at the end. */
+ totelelen += sdslen(tmp);
if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp);
/* Update the element with its initial score. */
dictSetKey(accumulator, de, tmp);
@@ -2380,7 +2392,7 @@ void zunionInterGenericCommand(client *c
if (dbDelete(c->db,dstkey))
touched = 1;
if (dstzset->zsl->length) {
- zsetConvertToZiplistIfNeeded(dstobj,maxelelen);
+ zsetConvertToZiplistIfNeeded(dstobj,maxelelen,totelelen);
dbAdd(c->db,dstkey,dstobj);
addReplyLongLong(c,zsetLength(dstobj));
signalModifiedKey(c->db,dstkey);
diff --git a/src/ziplist.c b/src/ziplist.c
index dbd804b11dfc..1a8566698972 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -265,6 +265,17 @@
ZIPLIST_LENGTH(zl) = intrev16ifbe(intrev16ifbe(ZIPLIST_LENGTH(zl))+incr); \
}
+/* Don't let ziplists grow over 1GB in any case, don't wanna risk overflow in
+ * zlbytes*/
+#define ZIPLIST_MAX_SAFETY_SIZE (1<<30)
+int ziplistSafeToAdd(unsigned char* zl, size_t add) {
+ size_t len = zl? ziplistBlobLen(zl): 0;
+ if (len + add > ZIPLIST_MAX_SAFETY_SIZE)
+ return 0;
+ return 1;
+}
+
+
/* We use this function to receive information about a ziplist entry.
* Note that this is not how the data is actually encoded, is just what we
* get filled by a function in order to operate more easily. */
@@ -586,7 +597,8 @@ unsigned char *ziplistNew(void) {
}
/* Resize the ziplist. */
-unsigned char *ziplistResize(unsigned char *zl, unsigned int len) {
+unsigned char *ziplistResize(unsigned char *zl, size_t len) {
+ assert(len < UINT32_MAX);
zl = zrealloc(zl,len);
ZIPLIST_BYTES(zl) = intrev32ifbe(len);
zl[len-1] = ZIP_END;
@@ -898,6 +910,9 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) {
/* Combined zl length should be limited within UINT16_MAX */
zllength = zllength < UINT16_MAX ? zllength : UINT16_MAX;
+ /* larger values can't be stored into ZIPLIST_BYTES */
+ assert(zlbytes < UINT32_MAX);
+
/* Save offset positions before we start ripping memory apart. */
size_t first_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*first));
size_t second_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*second));
diff --git a/src/ziplist.h b/src/ziplist.h
index 964a47f6dc29..f6ba6c8be47d 100644
--- a/src/ziplist.h
+++ b/src/ziplist.h
@@ -49,6 +49,7 @@ unsigned char *ziplistFind(unsigned char *p, unsigned char *vstr, unsigned int v
unsigned int ziplistLen(unsigned char *zl);
size_t ziplistBlobLen(unsigned char *zl);
void ziplistRepr(unsigned char *zl);
+int ziplistSafeToAdd(unsigned char* zl, size_t add);
#ifdef REDIS_TEST
int ziplistTest(int argc, char *argv[]);
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
index 74f491e483a5..46b56cc2822a 100644
--- a/tests/support/util.tcl
+++ b/tests/support/util.tcl
@@ -99,6 +99,27 @@ proc wait_for_ofs_sync {r1 r2} {
}
}
+# count current log lines in server's stdout
+proc count_log_lines {srv_idx} {
+ set _ [string trim [exec wc -l < [srv $srv_idx stdout]]]
+}
+
+# returns the number of times a line with that pattern appears in a file
+proc count_message_lines {file pattern} {
+ set res 0
+ # exec fails when grep exists with status other than 0 (when the patter wasn't found)
+ catch {
+ set res [string trim [exec grep $pattern $file 2> /dev/null | wc -l]]
+ }
+ return $res
+}
+
+# returns the number of times a line with that pattern appears in the log
+proc count_log_message {srv_idx pattern} {
+ set stdout [srv $srv_idx stdout]
+ return [count_message_lines $stdout $pattern]
+}
+
# Random integer between 0 and max (excluded).
proc randomInt {max} {
expr {int(rand()*$max)}
diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl
new file mode 100644
index 000000000000..d87b9236528e
--- /dev/null
+++ b/tests/unit/violations.tcl
@@ -0,0 +1,156 @@
+# These tests consume massive amounts of memory, and are not
+# suitable to be executed as part of the normal test suite
+set ::str500 [string repeat x 500000000] ;# 500mb
+
+# Utility function to write big argument into redis client connection
+proc write_big_bulk {size} {
+ r write "\$$size\r\n"
+ while {$size >= 500000000} {
+ r write $::str500
+ incr size -500000000
+ }
+ if {$size > 0} {
+ r write [string repeat x $size]
+ }
+ r write "\r\n"
+}
+
+# One XADD with one huge 5GB field
+# Expected to fail resulting in an empty stream
+start_server [list overrides [list save ""] ] {
+ test {XADD one huge field} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
+ r write "\$1\r\nA\r\n"
+ write_big_bulk 5000000000 ;#5gb
+ r flush
+ catch {r read} err
+ assert_match {*too large*} $err
+ r xlen S1
+ } {0}
+}
+
+# One XADD with one huge (exactly nearly) 4GB field
+# This uncovers the overflow in lpEncodeGetType
+# Expected to fail resulting in an empty stream
+start_server [list overrides [list save ""] ] {
+ test {XADD one huge field - 1} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
+ r write "\$1\r\nA\r\n"
+ write_big_bulk 4294967295 ;#4gb-1
+ r flush
+ catch {r read} err
+ assert_match {*too large*} $err
+ r xlen S1
+ } {0}
+}
+
+# Gradually add big stream fields using repeated XADD calls
+start_server [list overrides [list save ""] ] {
+ test {several XADD big fields} {
+ r config set stream-node-max-bytes 0
+ for {set j 0} {$j<10} {incr j} {
+ r xadd stream * 1 $::str500 2 $::str500
+ }
+ r ping
+ r xlen stream
+ } {10}
+}
+
+# Add over 4GB to a single stream listpack (one XADD command)
+# Expected to fail resulting in an empty stream
+start_server [list overrides [list save ""] ] {
+ test {single XADD big fields} {
+ r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n"
+ for {set j 0} {$j<10} {incr j} {
+ r write "\$1\r\n$j\r\n"
+ write_big_bulk 500000000 ;#500mb
+ }
+ r flush
+ catch {r read} err
+ assert_match {*too large*} $err
+ r xlen S
+ } {0}
+}
+
+# Gradually add big hash fields using repeated HSET calls
+# This reproduces the overflow in the call to ziplistResize
+# Object will be converted to hashtable encoding
+start_server [list overrides [list save ""] ] {
+ r config set hash-max-ziplist-value 1000000000 ;#1gb
+ test {hash with many big fields} {
+ for {set j 0} {$j<10} {incr j} {
+ r hset h $j $::str500
+ }
+ r object encoding h
+ } {hashtable}
+}
+
+# Add over 4GB to a single hash field (one HSET command)
+# Object will be converted to hashtable encoding
+start_server [list overrides [list save ""] ] {
+ test {hash with one huge field} {
+ catch {r config set hash-max-ziplist-value 10000000000} ;#10gb
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n"
+ r write "\$1\r\nA\r\n"
+ write_big_bulk 5000000000 ;#5gb
+ r flush
+ r read
+ r object encoding H1
+ } {hashtable}
+}
+
+# Add over 4GB to a single list member (one LPUSH command)
+# Currently unsupported, and expected to fail rather than being truncated
+# Expected to fail resulting in a non-existing list
+start_server [list overrides [list save ""] ] {
+ test {list with one huge field} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$2\r\nL1\r\n"
+ write_big_bulk 5000000000 ;#5gb
+ r flush
+ catch {r read} err
+ assert_match {*too large*} $err
+ r exists L1
+ } {0}
+}
+
+# SORT which attempts to store an element larger than 4GB into a list.
+# Currently unsupported and results in an assertion instead of truncation
+start_server [list overrides [list save ""] ] {
+ test {SORT adds huge field to list} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*3\r\n\$3\r\nSET\r\n\$2\r\nS1\r\n"
+ write_big_bulk 5000000000 ;#5gb
+ r flush
+ r read
+ assert_equal [r strlen S1] 5000000000
+ r set S2 asdf
+ r sadd myset 1 2
+ r mset D1 1 D2 2
+ catch {r sort myset by D* get S* store mylist}
+ # assert_equal [count_log_message 0 "crashed by signal"] 0 - not suitable for 6.0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+# SORT which stores an integer encoded element into a list.
+# Just for coverage, no news here.
+start_server [list overrides [list save ""] ] {
+ test {SORT adds integer field to list} {
+ r set S1 asdf
+ r set S2 123 ;# integer encoded
+ assert_encoding "int" S2
+ r sadd myset 1 2
+ r mset D1 1 D2 2
+ r sort myset by D* get S* store mylist
+ r llen mylist
+ } {2}
+}

@ -0,0 +1,69 @@
From 71be97294abf3657710a044157ebbc8a21489da3 Mon Sep 17 00:00:00 2001
From: Oran Agra <oran@redislabs.com>
Date: Wed, 9 Jun 2021 17:31:39 +0300
Subject: [PATCH] Prevent unauthenticated client from easily consuming lots of
memory (CVE-2021-32675)
This change sets a low limit for multibulk and bulk length in the
protocol for unauthenticated connections, so that they can't easily
cause redis to allocate massive amounts of memory by sending just a few
characters on the network.
The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
(cherry picked from commit 3d221e81f3b680543e34942579af190b049ff283)
---
src/networking.c | 8 ++++++++
tests/unit/auth.tcl | 16 ++++++++++++++++
2 files changed, 24 insertions(+)
diff --git a/src/networking.c b/src/networking.c
index bfaded9b4d0..2b8588094d2 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -1309,6 +1309,10 @@ int processMultibulkBuffer(client *c) {
addReplyError(c,"Protocol error: invalid multibulk length");
setProtocolError("invalid mbulk count",c);
return C_ERR;
+ } else if (ll > 10 && server.requirepass && !c->authenticated) {
+ addReplyError(c, "Protocol error: unauthenticated multibulk length");
+ setProtocolError("unauth mbulk count", c);
+ return C_ERR;
}
c->qb_pos = (newline-c->querybuf)+2;
@@ -1354,6 +1358,10 @@ int processMultibulkBuffer(client *c) {
addReplyError(c,"Protocol error: invalid bulk length");
setProtocolError("invalid bulk length",c);
return C_ERR;
+ } else if (ll > 16384 && server.requirepass && !c->authenticated) {
+ addReplyError(c, "Protocol error: unauthenticated bulk length");
+ setProtocolError("unauth bulk length", c);
+ return C_ERR;
}
c->qb_pos = newline-c->querybuf+2;
diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl
index 633cda95c92..f5da728e845 100644
--- a/tests/unit/auth.tcl
+++ b/tests/unit/auth.tcl
@@ -24,4 +24,20 @@ start_server {tags {"auth"} overrides {requirepass foobar}} {
r set foo 100
r incr foo
} {101}
+
+ test {For unauthenticated clients multibulk and bulk length are limited} {
+ set rr [redis [srv "host"] [srv "port"] 0]
+ $rr write "*100\r\n"
+ $rr flush
+ catch {[$rr read]} e
+ assert_match {*unauthenticated multibulk length*} $e
+ $rr close
+
+ set rr [redis [srv "host"] [srv "port"] 0]
+ $rr write "*1\r\n\$100000000\r\n"
+ $rr flush
+ catch {[$rr read]} e
+ assert_match {*unauthenticated bulk length*} $e
+ $rr close
+ }
}

@ -0,0 +1,73 @@
Backported for 5.0.3
From c043ba77cf9bbf73e964fd9b8681c0cc4bd2662e Mon Sep 17 00:00:00 2001
From: Oran Agra <oran@redislabs.com>
Date: Sun, 26 Sep 2021 15:42:17 +0300
Subject: [PATCH] Fix Integer overflow issue with intsets (CVE-2021-32687)
The vulnerability involves changing the default set-max-intset-entries
configuration parameter to a very large value and constructing specially
crafted commands to manipulate sets
(cherry picked from commit 4cb7075edaaf0584c74eb080d838ca8f56c190e3)
---
src/intset.c | 4 +++-
src/rdb.c | 4 +++-
src/t_set.c | 5 ++++-
3 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/src/intset.c b/src/intset.c
index 4445a5ca6c56..288e19adff18 100644
--- a/src/intset.c
+++ b/src/intset.c
@@ -34,6 +34,7 @@
#include "intset.h"
#include "zmalloc.h"
#include "endianconv.h"
+#include "redisassert.h"
/* Note that these encodings are ordered, so:
* INTSET_ENC_INT16 < INTSET_ENC_INT32 < INTSET_ENC_INT64. */
@@ -103,7 +104,8 @@ intset *intsetNew(void) {
/* Resize the intset */
static intset *intsetResize(intset *is, uint32_t len) {
- uint32_t size = len*intrev32ifbe(is->encoding);
+ uint64_t size = (uint64_t)len*intrev32ifbe(is->encoding);
+ assert(size <= SIZE_MAX - sizeof(intset));
is = zrealloc(is,sizeof(intset)+size);
return is;
}
diff --git a/src/rdb.c b/src/rdb.c
index afbbd8ca450c..3c58a1eaf7fb 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -1411,7 +1411,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) {
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
/* Use a regular set when there are too many entries. */
- if (len > server.set_max_intset_entries) {
+ size_t max_entries = server.set_max_intset_entries;
+ if (max_entries >= 1<<30) max_entries = 1<<30;
+ if (len > max_entries) {
o = createSetObject();
/* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */
diff --git a/src/t_set.c b/src/t_set.c
index f67073fe6bb1..db5a8cb757bb 100644
--- a/src/t_set.c
+++ b/src/t_set.c
@@ -66,7 +66,10 @@ int setTypeAdd(robj *subject, sds value) {
if (success) {
/* Convert to regular set when the intset contains
* too many entries. */
- if (intsetLen(subject->ptr) > server.set_max_intset_entries)
+ size_t max_entries = server.set_max_intset_entries;
+ /* limit to 1G entries due to intset internals. */
+ if (max_entries >= 1<<30) max_entries = 1<<30;
+ if (intsetLen(subject->ptr) > max_entries)
setTypeConvert(subject,OBJ_ENCODING_HT);
return 1;
}

@ -0,0 +1,94 @@
Backported for 5.0.3
From 48f04a82a0ac542341fb644a4cfbebadd5c59a33 Mon Sep 17 00:00:00 2001
From: Yossi Gottlieb <yossigo@gmail.com>
Date: Mon, 22 Feb 2021 15:41:32 +0200
Subject: [PATCH] Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths.
(cherry picked from commit d32f2e9999ce003bad0bd2c3bca29f64dcce4433)
Fix MSVR reported issue.
---
src/config.c | 16 ++++++++--------
src/sds.c | 3 +++
src/zmalloc.c | 10 ++++++++++
3 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/src/sds.c b/src/sds.c
index cd60946bdd32..12c9da356d9b 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -96,6 +96,7 @@ sds sdsnewlen(const void *init, size_t initlen) {
int hdrlen = sdsHdrSize(type);
unsigned char *fp; /* flags pointer. */
+ assert(hdrlen+initlen+1 > initlen); /* Catch size_t overflow */
sh = s_malloc(hdrlen+initlen+1);
if (init==SDS_NOINIT)
init = NULL;
@@ -214,6 +215,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
len = sdslen(s);
sh = (char*)s-sdsHdrSize(oldtype);
newlen = (len+addlen);
+ assert(newlen > len); /* Catch size_t overflow */
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
else
@@ -227,6 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
hdrlen = sdsHdrSize(type);
+ assert(hdrlen+newlen+1 > len); /* Catch size_t overflow */
if (oldtype==type) {
newsh = s_realloc(sh, hdrlen+newlen+1);
if (newsh == NULL) return NULL;
From 2b0ac7427ba5a6e1bc89380e960b138af893bbdd Mon Sep 17 00:00:00 2001
From: YiyuanGUO <yguoaz@gmail.com>
Date: Wed, 29 Sep 2021 10:20:35 +0300
Subject: [PATCH] Fix integer overflow in _sdsMakeRoomFor (CVE-2021-41099)
---
src/sds.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/sds.c b/src/sds.c
index 12c9da356d9b..73d9807ae3c0 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -205,7 +205,7 @@ void sdsclear(sds s) {
sds sdsMakeRoomFor(sds s, size_t addlen) {
void *sh, *newsh;
size_t avail = sdsavail(s);
- size_t len, newlen;
+ size_t len, newlen, reqlen;
char type, oldtype = s[-1] & SDS_TYPE_MASK;
int hdrlen;
@@ -214,7 +214,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
len = sdslen(s);
sh = (char*)s-sdsHdrSize(oldtype);
- newlen = (len+addlen);
+ reqlen = newlen = (len+addlen);
assert(newlen > len); /* Catch size_t overflow */
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
@@ -229,7 +229,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
hdrlen = sdsHdrSize(type);
- assert(hdrlen+newlen+1 > len); /* Catch size_t overflow */
+ assert(hdrlen + newlen + 1 > reqlen); /* Catch size_t overflow */
if (oldtype==type) {
newsh = s_realloc(sh, hdrlen+newlen+1);
if (newsh == NULL) return NULL;

@ -0,0 +1,6 @@
# If you need to change max open file limit
# for example, when you change maxclient in configuration
# you can change the value below
# see "man limits.conf" for information
redis soft nofile 10240
redis hard nofile 10240

@ -1,14 +1,7 @@
# If you need to change max open file limit # If you need to change max open file limit
# for example, when you change maxclient in configuration # for example, when you change maxclient in configuration
# you can change the LimitNOFILE value below. # you can change the LimitNOFILE value below
# See "man systemd.exec" for more information. # see "man systemd.exec" for information
# Slave nodes on large system may take lot of time to start.
# You may need to uncomment TimeoutStartSec and TimeoutStopSec
# directives below and raise their value.
# See "man systemd.service" for more information.
[Service] [Service]
LimitNOFILE=10240 LimitNOFILE=10240
#TimeoutStartSec=90s
#TimeoutStopSec=90s

@ -0,0 +1,94 @@
#!/bin/sh
#
# redis init file for starting up the redis-sentinel daemon
#
# chkconfig: - 21 79
# description: Starts and stops the redis-sentinel daemon.
#
### BEGIN INIT INFO
# Provides: redis-sentinel
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Short-Description: start and stop Sentinel server
# Description: A persistent key-value database
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
name="redis-sentinel"
exec="/usr/bin/$name"
shut="/usr/libexec/redis-shutdown"
pidfile="/var/run/redis/sentinel.pid"
SENTINEL_CONFIG="/etc/redis-sentinel.conf"
[ -e /etc/sysconfig/redis-sentinel ] && . /etc/sysconfig/redis-sentinel
lockfile=/var/lock/subsys/redis
start() {
[ -f $SENTINEL_CONFIG ] || exit 6
[ -x $exec ] || exit 5
echo -n $"Starting $name: "
daemon --user ${REDIS_USER-redis} "$exec $SENTINEL_CONFIG --daemonize yes --pidfile $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $name: "
[ -x $shut ] && $shut $name
retval=$?
if [ -f $pidfile ]
then
# shutdown haven't work, try old way
killproc -p $pidfile $name
retval=$?
else
success "$name shutdown"
fi
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
rh_status() {
status -p $pidfile $name
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart}"
exit 2
esac
exit $?

@ -1,12 +1,10 @@
[Unit] [Unit]
Description=Redis Sentinel Description=Redis Sentinel
After=network.target After=network.target
After=network-online.target
Wants=network-online.target
[Service] [Service]
ExecStart=/usr/bin/redis-sentinel /etc/redis/sentinel.conf --daemonize no --supervised systemd ExecStart=/usr/bin/redis-sentinel /etc/redis-sentinel.conf --supervised systemd
ExecStop=/usr/libexec/redis-shutdown sentinel ExecStop=/usr/libexec/redis-shutdown redis-sentinel
Type=notify Type=notify
User=redis User=redis
Group=redis Group=redis

@ -12,7 +12,7 @@ if [ -z "$SERVICE_NAME" ]; then
fi fi
# Get the proper config file based on service name # Get the proper config file based on service name
CONFIG_FILE="/etc/redis/$SERVICE_NAME.conf" CONFIG_FILE="/etc/$SERVICE_NAME.conf"
# Use awk to retrieve host, port from config file # Use awk to retrieve host, port from config file
HOST=`awk '/^[[:blank:]]*bind/ { print $2 }' $CONFIG_FILE | tail -n1` HOST=`awk '/^[[:blank:]]*bind/ { print $2 }' $CONFIG_FILE | tail -n1`

@ -0,0 +1,94 @@
#!/bin/sh
#
# redis init file for starting up the redis daemon
#
# chkconfig: - 20 80
# description: Starts and stops the redis daemon.
#
### BEGIN INIT INFO
# Provides: redis-server
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Short-Description: start and stop Redis server
# Description: A persistent key-value database
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
name="redis-server"
exec="/usr/bin/$name"
shut="/usr/libexec/redis-shutdown"
pidfile="/var/run/redis/redis.pid"
REDIS_CONFIG="/etc/redis.conf"
[ -e /etc/sysconfig/redis ] && . /etc/sysconfig/redis
lockfile=/var/lock/subsys/redis
start() {
[ -f $REDIS_CONFIG ] || exit 6
[ -x $exec ] || exit 5
echo -n $"Starting $name: "
daemon --user ${REDIS_USER-redis} "$exec $REDIS_CONFIG --daemonize yes --pidfile $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $name: "
[ -x $shut ] && $shut
retval=$?
if [ -f $pidfile ]
then
# shutdown haven't work, try old way
killproc -p $pidfile $name
retval=$?
else
success "$name shutdown"
fi
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
rh_status() {
status -p $pidfile $name
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart}"
exit 2
esac
exit $?

@ -1,11 +1,9 @@
[Unit] [Unit]
Description=Redis persistent key-value database Description=Redis persistent key-value database
After=network.target After=network.target
After=network-online.target
Wants=network-online.target
[Service] [Service]
ExecStart=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no --supervised systemd ExecStart=/usr/bin/redis-server /etc/redis.conf --supervised systemd
ExecStop=/usr/libexec/redis-shutdown ExecStop=/usr/libexec/redis-shutdown
Type=notify Type=notify
User=redis User=redis

@ -1,5 +1,5 @@
# #
# Fedora spec file for redis # RHEL / Fedora spec file for redis
# #
# License: MIT # License: MIT
# http://opensource.org/licenses/MIT # http://opensource.org/licenses/MIT
@ -8,32 +8,35 @@
# #
# Tests fail in mock, not in local build. # Tests fail in mock, not in local build.
%bcond_with tests %global with_tests 0%{?_with_tests:1}
# Commit IDs for the (unversioned) redis-doc repository # Commit IDs for the (unversioned) redis-doc repository
# https://fedoraproject.org/wiki/Packaging:SourceURL "Commit Revision" # https://fedoraproject.org/wiki/Packaging:SourceURL "Commit Revision"
%global doc_commit 3fdb6df44ecd5c4d99ea52a0133177f5ebc24805 %global doc_commit a1e79fc9b2f42f04a8ab59c05c3228931adcd0a6
%global short_doc_commit %(c=%{doc_commit}; echo ${c:0:7}) %global short_doc_commit %(c=%{doc_commit}; echo ${c:0:7})
# %%{rpmmacrodir} not usable on EL-6 # %%{rpmmacrodir} not usable on EL-6
%global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d) %global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d)
Name: redis Name: redis
Version: 6.2.7 Version: 5.0.3
Release: 1%{?dist} Release: 5%{?dist}
Summary: A persistent key-value database Summary: A persistent key-value database
# redis, jemalloc, linenoise, lzf, hiredis are BSD # redis, jemalloc, linenoise, lzf, hiredis are BSD
# lua is MIT # lua is MIT
License: BSD and MIT License: BSD and MIT
URL: https://redis.io URL: http://redis.io
Source0: https://download.redis.io/releases/%{name}-%{version}.tar.gz Source0: http://download.redis.io/releases/%{name}-%{version}.tar.gz
Source1: %{name}.logrotate Source1: %{name}.logrotate
Source2: %{name}-sentinel.service Source2: %{name}-sentinel.service
Source3: %{name}.service Source3: %{name}.service
Source4: %{name}-sentinel.init
Source5: %{name}.init
Source6: %{name}-shutdown Source6: %{name}-shutdown
Source7: %{name}-limit-systemd Source7: %{name}-limit-systemd
Source8: %{name}-limit-init
Source9: macros.%{name} Source9: macros.%{name}
Source10: https://github.com/%{name}/%{name}-doc/archive/%{doc_commit}/%{name}-doc-%{short_doc_commit}.tar.gz Source10: https://github.com/antirez/%{name}-doc/archive/%{doc_commit}/%{name}-doc-%{short_doc_commit}.tar.gz
# To refresh patches: # To refresh patches:
# tar xf redis-xxx.tar.gz && cd redis-xxx && git init && git add . && git commit -m "%%{version} baseline" # tar xf redis-xxx.tar.gz && cd redis-xxx && git init && git add . && git commit -m "%%{version} baseline"
@ -41,20 +44,25 @@ Source10: https://github.com/%{name}/%{name}-doc/archive/%{doc_commit}/
# Then refresh your patches # Then refresh your patches
# git format-patch HEAD~<number of expected patches> # git format-patch HEAD~<number of expected patches>
# Update configuration for Fedora # Update configuration for Fedora
# https://github.com/redis/redis/pull/3491 - man pages # https://github.com/antirez/redis/pull/3491 - man pages
Patch0001: 0001-1st-man-pageis-for-redis-cli-redis-benchmark-redis-c.patch Patch0001: 0001-1st-man-pageis-for-redis-cli-redis-benchmark-redis-c.patch
# https://github.com/antirez/redis/pull/3494 - symlink
BuildRequires: make Patch0002: 0002-install-redis-check-rdb-as-a-symlink-instead-of-dupl.patch
BuildRequires: gcc
%if %{with tests} # Security patches
Patch100: redis-CVE-2019-10192.patch
Patch101: redis-CVE-2019-10193.patch
Patch102: redis-CVE-2021-41099.patch
Patch103: redis-CVE-2021-32687.patch
Patch104: redis-CVE-2021-32626.patch
Patch105: redis-CVE-2021-32627.patch
Patch106: redis-CVE-2021-32675.patch
%if 0%{?with_tests}
BuildRequires: procps-ng BuildRequires: procps-ng
BuildRequires: tcl BuildRequires: tcl
%endif %endif
BuildRequires: pkgconfig(libsystemd) BuildRequires: systemd
BuildRequires: systemd-devel
BuildRequires: openssl-devel
# redis-trib functionality migrated to redis-cli
Obsoletes: redis-trib < 5
# Required for redis-shutdown # Required for redis-shutdown
Requires: /bin/awk Requires: /bin/awk
Requires: logrotate Requires: logrotate
@ -62,14 +70,10 @@ Requires(pre): shadow-utils
Requires(post): systemd Requires(post): systemd
Requires(preun): systemd Requires(preun): systemd
Requires(postun): systemd Requires(postun): systemd
# from deps/hiredis/hiredis.h Provides: bundled(hiredis)
Provides: bundled(hiredis) = 0.14.0 Provides: bundled(lua-libs)
# from deps/jemalloc/VERSION Provides: bundled(linenoise)
Provides: bundled(jemalloc) = 5.1.0 Provides: bundled(jemalloc) = 4.0.3
# from deps/lua/src/lua.h
Provides: bundled(lua-libs) = 5.1.5
# from deps/linenoise/linenoise.h
Provides: bundled(linenoise) = 1.0
Provides: bundled(lzf) Provides: bundled(lzf)
%global redis_modules_abi 1 %global redis_modules_abi 1
@ -128,14 +132,23 @@ administration and development.
%setup -q %setup -q
mv ../%{name}-doc-%{doc_commit} doc mv ../%{name}-doc-%{doc_commit} doc
%patch0001 -p1 %patch0001 -p1
%patch0002 -p1
%patch100 -p1 -b .cve-2019-10192
%patch101 -p1 -b .cve-2019-10193
%patch102 -p1 -b .cve-2021-41099
%patch103 -p1 -b .cve-2021-32687
%patch104 -p1 -b .cve-2021-32626
%patch105 -p1 -b .cve-2021-32627
%patch106 -p1 -b .cve-2021-32675
mv deps/lua/COPYRIGHT COPYRIGHT-lua mv deps/lua/COPYRIGHT COPYRIGHT-lua
mv deps/jemalloc/COPYING COPYING-jemalloc mv deps/jemalloc/COPYING COPYING-jemalloc
mv deps/hiredis/COPYING COPYING-hiredis mv deps/hiredis/COPYING COPYING-hiredis
# Configuration file changes # Configuration file changes and additions
sed -i -e 's|^logfile .*$|logfile /var/log/redis/redis.log|g' redis.conf sed -i -e 's|^logfile .*$|logfile /var/log/redis/redis.log|g' redis.conf
sed -i -e 's|^logfile .*$|logfile /var/log/redis/sentinel.log|g' sentinel.conf sed -i -e '$ alogfile /var/log/redis/sentinel.log' sentinel.conf
sed -i -e 's|^dir .*$|dir /var/lib/redis|g' redis.conf sed -i -e 's|^dir .*$|dir /var/lib/redis|g' redis.conf
# Module API version safety check # Module API version safety check
@ -146,10 +159,10 @@ if test "$api" != "%{redis_modules_abi}"; then
exit 1 exit 1
fi fi
%global make_flags DEBUG="" V="echo" LDFLAGS="%{?__global_ldflags}" CFLAGS+="%{optflags} -fPIC" INSTALL="install -p" PREFIX=%{buildroot}%{_prefix} BUILD_WITH_SYSTEMD=yes BUILD_TLS=yes %global make_flags DEBUG="" V="echo" LDFLAGS="%{?__global_ldflags}" CFLAGS+="%{optflags} -fPIC" INSTALL="install -p" PREFIX=%{buildroot}%{_prefix}
%build %build
%make_build %{make_flags} all make %{?_smp_mflags} %{make_flags} all
%install %install
make %{make_flags} install make %{make_flags} install
@ -164,8 +177,8 @@ install -d %{buildroot}%{redis_modules_dir}
install -pDm644 %{S:1} %{buildroot}%{_sysconfdir}/logrotate.d/%{name} install -pDm644 %{S:1} %{buildroot}%{_sysconfdir}/logrotate.d/%{name}
# Install configuration files. # Install configuration files.
install -pDm640 %{name}.conf %{buildroot}%{_sysconfdir}/%{name}/%{name}.conf install -pDm640 %{name}.conf %{buildroot}%{_sysconfdir}/%{name}.conf
install -pDm640 sentinel.conf %{buildroot}%{_sysconfdir}/%{name}/sentinel.conf install -pDm640 sentinel.conf %{buildroot}%{_sysconfdir}/%{name}-sentinel.conf
# Install systemd unit files. # Install systemd unit files.
mkdir -p %{buildroot}%{_unitdir} mkdir -p %{buildroot}%{_unitdir}
@ -208,8 +221,8 @@ mkdir -p %{buildroot}%{macrosdir}
install -pDm644 %{S:9} %{buildroot}%{macrosdir}/macros.%{name} install -pDm644 %{S:9} %{buildroot}%{macrosdir}/macros.%{name}
%check %check
%if %{with tests} %if 0%{?with_tests}
# https://github.com/redis/redis/issues/1417 (for "taskset -c 1") # https://github.com/antirez/redis/issues/1417 (for "taskset -c 1")
taskset -c 1 make %{make_flags} test taskset -c 1 make %{make_flags} test
make %{make_flags} test-sentinel make %{make_flags} test-sentinel
%endif %endif
@ -223,25 +236,6 @@ useradd -r -g %{name} -d %{_sharedstatedir}/%{name} -s /sbin/nologin \
exit 0 exit 0
%post %post
if [ -f %{_sysconfdir}/%{name}.conf -a ! -L %{_sysconfdir}/%{name}.conf ]; then
if [ -f %{_sysconfdir}/%{name}/%{name}.conf.rpmnew ]; then
rm %{_sysconfdir}/%{name}/%{name}.conf.rpmnew
fi
if [ -f %{_sysconfdir}/%{name}/%{name}.conf ]; then
mv %{_sysconfdir}/%{name}/%{name}.conf %{_sysconfdir}/%{name}/%{name}.conf.rpmnew
fi
mv %{_sysconfdir}/%{name}.conf %{_sysconfdir}/%{name}/%{name}.conf
echo -e "\nWarning: %{name} configuration is now in %{_sysconfdir}/%{name} directory\n"
fi
if [ -f %{_sysconfdir}/%{name}-sentinel.conf -a ! -L %{_sysconfdir}/%{name}-sentinel.conf ]; then
if [ -f %{_sysconfdir}/%{name}/sentinel.conf.rpmnew ]; then
rm %{_sysconfdir}/%{name}/sentinel.conf.rpmnew
fi
if [ -f %{_sysconfdir}/%{name}/sentinel.conf ]; then
mv %{_sysconfdir}/%{name}/sentinel.conf %{_sysconfdir}/%{name}/sentinel.conf.rpmnew
fi
mv %{_sysconfdir}/%{name}-sentinel.conf %{_sysconfdir}/%{name}/sentinel.conf
fi
%systemd_post %{name}.service %systemd_post %{name}.service
%systemd_post %{name}-sentinel.service %systemd_post %{name}-sentinel.service
@ -260,9 +254,8 @@ fi
%license COPYING-jemalloc %license COPYING-jemalloc
%license COPYING-hiredis %license COPYING-hiredis
%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
%attr(0750, redis, root) %dir %{_sysconfdir}/%{name} %attr(0640, redis, root) %config(noreplace) %{_sysconfdir}/%{name}.conf
%attr(0640, redis, root) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.conf %attr(0640, redis, root) %config(noreplace) %{_sysconfdir}/%{name}-sentinel.conf
%attr(0640, redis, root) %config(noreplace) %{_sysconfdir}/%{name}/sentinel.conf
%dir %attr(0750, redis, redis) %{_libdir}/%{name} %dir %attr(0750, redis, redis) %{_libdir}/%{name}
%dir %attr(0750, redis, redis) %{redis_modules_dir} %dir %attr(0750, redis, redis) %{redis_modules_dir}
%dir %attr(0750, redis, redis) %{_sharedstatedir}/%{name} %dir %attr(0750, redis, redis) %{_sharedstatedir}/%{name}
@ -283,174 +276,54 @@ fi
%dir %attr(0755, redis, redis) %ghost %{_localstatedir}/run/%{name} %dir %attr(0755, redis, redis) %ghost %{_localstatedir}/run/%{name}
%files devel %files devel
# main package is not required
%license COPYING %license COPYING
%{_includedir}/%{name}module.h %{_includedir}/%{name}module.h
%{macrosdir}/* %{macrosdir}/*
%files doc %files doc
# specific for documentation (CC-BY-SA)
%license doc/LICENSE
%docdir %{_docdir}/%{name} %docdir %{_docdir}/%{name}
%{_docdir}/%{name} %{_docdir}/%{name}
%changelog %changelog
* Tue May 10 2022 Remi Collet <rcollet@redhat.com> - 6.2.7-1 * Mon Oct 11 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-5
- rebase to 6.2.7 #2083151 - fix denial of service via Redis Standard Protocol (RESP) request
CVE-2021-32675
* Wed Nov 3 2021 Remi Collet <remi@remirepo.net> - 6.2.6-1
- rebase to 6.2.6 #2013992 * Thu Oct 7 2021 Remi Collet <rcollet@redhat.com> - 5.0.3-4
- refresh documentation - fix lua scripts can overflow the heap-based Lua stack
- use proper license file for documentation CVE-2021-32626
- fix integer overflow issue with Streams
* Tue Aug 10 2021 Mohan Boddu <mboddu@redhat.com> - 6.2.3-3 CVE-2021-32627
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags - fix integer overflow bug in the ziplist data structure
Related: rhbz#1991688 CVE-2021-32628
- fix integer overflow issue with intsets
* Wed Jun 16 2021 Mohan Boddu <mboddu@redhat.com> - 6.2.3-2 CVE-2021-32687
- Rebuilt for RHEL 9 BETA for openssl 3.0 - fix integer overflow issue with strings
Related: rhbz#1971065 CVE-2021-41099
* Tue May 4 2021 Remi Collet <remi@remirepo.net> - 6.2.3-1 * Thu Jul 11 2019 Remi Collet <rcollet@redhat.com> - 5.0.3-2
- Upstream 6.2.3 release - fix Heap buffer overflow in HyperLogLog triggered by malicious client
CVE-2019-10192
* Tue Apr 20 2021 Remi Collet <remi@remirepo.net> - 6.2.2-1 - fix Stack buffer overflow in HyperLogLog triggered by malicious client
- Upstream 6.2.2 release CVE-2019-10193
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 6.2.1-2 * Thu Dec 13 2018 Remi Collet <rcollet@redhat.com> - 5.0.3-1
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937 - update to 5.0.3
* Thu Apr 01 2021 Nathan Scott <nathans@redhat.com> - 6.2.1-1
- Upstream 6.2.1 release
- Merged make-macros spec change from Tom Stellard
* Tue Mar 02 2021 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl> - 6.2.0-2
- Rebuilt for updated systemd-rpm-macros
See https://pagure.io/fesco/issue/2583.
* Mon Mar 01 2021 Nathan Scott <nathans@redhat.com> - 6.2.0-1
- Upstream 6.2.0 release (RHBZ #1915463).
- drop patch merged upstream.
* Wed Feb 24 2021 Nathan Scott <nathans@redhat.com> - 6.0.11-1
- Upstream 6.0.11 release.
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 6.0.10-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Wed Jan 13 2021 Remi Collet <remi@remirepo.net> - 6.0.10-1
- Upstream 6.0.10 release.
* Tue Nov 24 2020 Remi Collet <remi@remirepo.net> - 6.0.9-3
- fix check for regular file, not symlink
* Mon Nov 23 2020 Remi Collet <remi@remirepo.net> - 6.0.9-2
- move configuration in /etc/redis per upstream recommendation
see https://github.com/redis/redis/issues/8051
* Tue Oct 27 2020 Remi Collet <remi@remirepo.net> - 6.0.9-1
- Upstream 6.0.9 release.
* Tue Oct 20 2020 Remi Collet <remi@remirepo.net> - 6.0.8-2
- add missing LICENSE files in main package
* Thu Sep 10 2020 Remi Collet <remi@remirepo.net> - 6.0.8-1
- Upstream 6.0.8 release.
* Tue Sep 1 2020 Remi Collet <remi@remirepo.net> - 6.0.7-1
- Upstream 6.0.7 release.
- drop patch merged upstream
* Wed Jul 29 2020 Fedora Release Engineering <releng@fedoraproject.org> - 6.0.6-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Tue Jul 21 2020 Remi Collet <rcollet@redhat.com> - 6.0.6-1
- Upstream 6.0.6 release.
- drop patch merged upstream
- open https://github.com/redis/redis/pull/7543 fix deprecated tail syntax
* Wed Jun 10 2020 Nathan Scott <nathans@redhat.com> - 6.0.5-1
- Upstream 6.0.5 release.
* Thu May 28 2020 Remi Collet <remi@remirepo.net> - 6.0.4-3
- Add comment for TimeoutStartSec and TimeoutStopSec in limit.conf
- Fix missing notification to systemd for sentinel
patch from https://github.com/redis/redis/pull/7168
- Upstream 6.0.4 release.
* Mon May 18 2020 Nathan Scott <nathans@redhat.com> - 6.0.3-1
- Upstream 6.0.3 release.
* Wed May 6 2020 Remi Collet <rcollet@redhat.com> - 6.0.1-1
- Upstream 6.0.1 release.
* Fri May 01 2020 Nathan Scott <nathans@redhat.com> - 6.0.0-1
- Upstream 6.0.0 release.
* Fri Mar 13 2020 Nathan Scott <nathans@redhat.com> - 5.0.8-1
- Upstream 5.0.8 release.
* Wed Feb 12 2020 Nathan Scott <nathans@redhat.com> - 5.0.7-3
- Patch extern SDS_NOINIT definition for gcc 10 (RHBZ #1799969)
* Thu Jan 30 2020 Fedora Release Engineering <releng@fedoraproject.org> - 5.0.7-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Tue Nov 19 2019 Carl George <carl@george.computer> - 5.0.7-1
- Latest upstream
* Thu Sep 26 2019 Nathan Scott <nathans@redhat.com> - 5.0.6-1
- Upstream 5.0.6 release and redis-doc updates.
* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 5.0.5-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Mon Jul 15 2019 Nathan Scott <nathans@redhat.com> - 5.0.5-2
- Use the (modified) bundled jemalloc for defrag (RHBZ #1725852)
* Thu May 16 2019 Nathan Scott <nathans@redhat.com> - 5.0.5-1
- Upstream 5.0.5 release and redis-doc updates.
* Sat May 11 2019 Nathan Scott <nathans@redhat.com> - 5.0.4-2
- Obsolete redis-trib - functionality now in redis-cli(1)
- Remove old chkconfig support, all systemd platforms now
* Tue Mar 19 2019 Nathan Scott <nathans@redhat.com> - 5.0.4-1
- Upstream 5.0.4 release and redis-doc updates.
- Fix sentinel.conf logfile line addition.
* Sat Feb 02 2019 Fedora Release Engineering <releng@fedoraproject.org> - 5.0.3-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Thu Dec 13 2018 Nathan Scott <nathans@redhat.com> - 5.0.3-1
- Upstream 5.0.3 release and redis-doc updates.
* Fri Nov 23 2018 Nathan Scott <nathans@redhat.com> - 5.0.2-1
- Upstream 5.0.2 release and redis-doc updates.
* Thu Nov 08 2018 Nathan Scott <nathans@redhat.com> - 5.0.1-1
- Upstream 5.0.1 release.
* Thu Oct 18 2018 Nathan Scott <nathans@redhat.com> - 5.0.0-1
- Update systemd service files for network-online requirement
- Upstream 5.0.0 release.
* Thu Aug 09 2018 Nathan Scott <nathans@redhat.com> - 4.0.11-1
- Drop the pandoc build dependency, install only markdown.
- Upstream 4.0.11 release.
* Sat Jul 14 2018 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.10-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
* Mon Jun 25 2018 Remi Collet <rcollet@redhat.com> - 4.0.10-2 * Mon Jun 25 2018 Remi Collet <rcollet@redhat.com> - 4.0.10-2
- drop build dependency on pandoc
- drop dependency on jemalloc #1591762
- fix License (BSD and MIT) - fix License (BSD and MIT)
- add bundled libraries licences - add bundled libraries licences
- add information about bundled lzf - cleanup conditions from spec file
* Thu Jun 14 2018 Nathan Scott <nathans@redhat.com> - 4.0.10-1 * Thu Jun 14 2018 Nathan Scott <nathans@redhat.com> - 4.0.10-1
- Upstream 4.0.10 release. - Upstream 4.0.10 release.
* Mon May 21 2018 Joe Orton <jorton@redhat.com> - 4.0.9-1.2
- rebuild (#1571197)
* Tue Mar 27 2018 Nathan Scott <nathans@redhat.com> - 4.0.9-1 * Tue Mar 27 2018 Nathan Scott <nathans@redhat.com> - 4.0.9-1
- Upstream 4.0.9 release. - Upstream 4.0.9 release.
@ -540,11 +413,11 @@ fi
* Wed Sep 14 2016 Remi Collet <remi@fedoraproject.org> - 3.2.3-2 * Wed Sep 14 2016 Remi Collet <remi@fedoraproject.org> - 3.2.3-2
- add missing man pages #1374577 - add missing man pages #1374577
using patch from https://github.com/redis/redis/pull/3491 using patch from https://github.com/antirez/redis/pull/3491
- data and configuration should not be publicly readable #1374700 - data and configuration should not be publicly readable #1374700
- remove /var/run/redis with systemd #1374728 - remove /var/run/redis with systemd #1374728
- provide redis-check-rdb as a symlink to redis-server #1374736 - provide redis-check-rdb as a symlink to redis-server #1374736
using patch from https://github.com/redis/redis/pull/3494 using patch from https://github.com/antirez/redis/pull/3494
- move redis-shutdown to libexec - move redis-shutdown to libexec
* Thu Aug 4 2016 Haïkel Guémar <hguemar@fedoraproject.org> - 3.2.3-1 * Thu Aug 4 2016 Haïkel Guémar <hguemar@fedoraproject.org> - 3.2.3-1

Loading…
Cancel
Save