Skip to content

Commit 8164c93

Browse files
committed
Merge branch 'unstable' into RELEASE_0_9
2 parents 8262170 + d380074 commit 8164c93

34 files changed

+566
-181
lines changed

MANIFESTO

Lines changed: 0 additions & 67 deletions
This file was deleted.

README.md

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
What is KeyDB?
22
--------------
33

4-
KeyDB is a high performance fork of Redis focussing on multithreading, memory efficiency, and high throughput. In addition to multithreading KeyDB also has features only available in Redis Enterprise such as FLASH storage support, and some not available at all such as direct backup to AWS S3.
4+
KeyDB is a high performance fork of Redis focusing on multithreading, memory efficiency, and high throughput. In addition to multithreading KeyDB also has features only available in Redis Enterprise such as FLASH storage support, and some not available at all such as direct backup to AWS S3.
55

66
On the same hardware KeyDB can perform twice as many queries per second as Redis, with 60% lower latency.
77

88
KeyDB has full compatibility with the Redis protocol, modules, and scripts. This includes full support for transactions, and atomic execution of scripts. For more information see our architecture section below.
99

10+
Try our docker container: https://hub.docker.com/r/eqalpha/keydb
11+
1012
Why fork Redis?
1113
---------------
1214

@@ -39,7 +41,7 @@ If you would like to use the FLASH backed storage this option configures the dir
3941

4042
db-s3-object /path/to/bucket
4143

42-
If you would like KeyDB to dump directly to AWS S3 this option specifies the bucket. Using this option with the traditional RDB options will result in KeyDB backing up twice to both locations. This requires the AWS CLI tools to be installed and configured which are used under the hood to transfer the data.
44+
If you would like KeyDB to dump and load directly to AWS S3 this option specifies the bucket. Using this option with the traditional RDB options will result in KeyDB backing up twice to both locations. If both are specified KeyDB will first attempt to load from the local dump file and if that fails load from S3. This requires the AWS CLI tools to be installed and configured which are used under the hood to transfer the data.
4345

4446
All other configuration options behave as you'd expect. Your existing configuration files should continue to work unchanged.
4547

@@ -179,6 +181,8 @@ for Ubuntu and Debian systems:
179181
% cd utils
180182
% ./install_server.sh
181183

184+
_Note_: `install_server.sh` will not work on Mac OSX; it is built for Linux only.
185+
182186
The script will ask you a few questions and will setup everything you need
183187
to run KeyDB properly as a background daemon that will start again on
184188
system reboots.
@@ -189,7 +193,7 @@ You'll be able to stop and start KeyDB using the script named
189193
Multithreading Architecture
190194
---------------------------
191195

192-
KeyDB works by running the normal Redis event loop on multiple threads. Network IO, and query parsing are done concurrently. Each connection is assigned a thread on accept(). Access to the core hash table is guarded by spinlock. Because the hashtable access is extremely fast this lock has low contention. Transactions hold the lock for the duration of the EXEC command. Modules work in concert with the GIL which is only acquired when all server threads are paused. This maintains the atomicity gurantees modules expect.
196+
KeyDB works by running the normal Redis event loop on multiple threads. Network IO, and query parsing are done concurrently. Each connection is assigned a thread on accept(). Access to the core hash table is guarded by spinlock. Because the hashtable access is extremely fast this lock has low contention. Transactions hold the lock for the duration of the EXEC command. Modules work in concert with the GIL which is only acquired when all server threads are paused. This maintains the atomicity guarantees modules expect.
193197

194198
Unlike most databases the core data structure is the fastest part of the system. Most of the query time comes from parsing the REPL protocol and copying data to/from the network.
195199

src/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,3 +343,6 @@ install: all
343343
$(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN)
344344
$(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN)
345345
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME)
346+
347+
uninstall:
348+
rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME)}

src/acl.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1389,6 +1389,8 @@ void ACLLoadUsersAtStartup(void) {
13891389
* ACL SETUSER <username> ... acl rules ...
13901390
* ACL DELUSER <username> [...]
13911391
* ACL GETUSER <username>
1392+
* ACL GENPASS
1393+
* ACL WHOAMI
13921394
*/
13931395
void aclCommand(client *c) {
13941396
char *sub = ptrFromObj(c->argv[1]);
@@ -1571,6 +1573,10 @@ void aclCommand(client *c) {
15711573
}
15721574
dictReleaseIterator(di);
15731575
setDeferredArrayLen(c,dl,arraylen);
1576+
} else if (!strcasecmp(sub,"genpass") && c->argc == 2) {
1577+
char pass[32]; /* 128 bits of actual pseudo random data. */
1578+
getRandomHexChars(pass,sizeof(pass));
1579+
addReplyBulkCBuffer(c,pass,sizeof(pass));
15741580
} else if (!strcasecmp(sub,"help")) {
15751581
const char *help[] = {
15761582
"LOAD -- Reload users from the ACL file.",
@@ -1581,6 +1587,7 @@ void aclCommand(client *c) {
15811587
"DELUSER <username> [...] -- Delete a list of users.",
15821588
"CAT -- List available categories.",
15831589
"CAT <category> -- List commands inside category.",
1590+
"GENPASS -- Generate a secure user password.",
15841591
"WHOAMI -- Return the current connection username.",
15851592
NULL
15861593
};

src/aof.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1621,6 +1621,9 @@ void aofRemoveTempFile(pid_t childpid) {
16211621

16221622
snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) childpid);
16231623
unlink(tmpfile);
1624+
1625+
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) childpid);
1626+
unlink(tmpfile);
16241627
}
16251628

16261629
/* Update the server.aof_current_size field explicitly using stat(2)

src/blocked.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,18 +77,25 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb
7777
* is zero. */
7878
int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) {
7979
long long tval;
80+
long double ftval;
8081

81-
if (getLongLongFromObjectOrReply(c,object,&tval,
82-
"timeout is not an integer or out of range") != C_OK)
83-
return C_ERR;
82+
if (unit == UNIT_SECONDS) {
83+
if (getLongDoubleFromObjectOrReply(c,object,&ftval,
84+
"timeout is not an float or out of range") != C_OK)
85+
return C_ERR;
86+
tval = (long long) (ftval * 1000.0);
87+
} else {
88+
if (getLongLongFromObjectOrReply(c,object,&tval,
89+
"timeout is not an integer or out of range") != C_OK)
90+
return C_ERR;
91+
}
8492

8593
if (tval < 0) {
8694
addReplyError(c,"timeout is negative");
8795
return C_ERR;
8896
}
8997

9098
if (tval > 0) {
91-
if (unit == UNIT_SECONDS) tval *= 1000;
9299
tval += mstime();
93100
}
94101
*timeout = tval;

src/cluster.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3031,6 +3031,7 @@ void clusterHandleSlaveFailover(void) {
30313031
if (server.cluster->mf_end) {
30323032
server.cluster->failover_auth_time = mstime();
30333033
server.cluster->failover_auth_rank = 0;
3034+
clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER);
30343035
}
30353036
serverLog(LL_WARNING,
30363037
"Start of election delayed for %lld milliseconds "

src/config.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1103,8 +1103,8 @@ void configSetCommand(client *c) {
11031103
int soft_seconds;
11041104

11051105
class = getClientTypeByName(v[j]);
1106-
hard = strtoll(v[j+1],NULL,10);
1107-
soft = strtoll(v[j+2],NULL,10);
1106+
hard = memtoll(v[j+1],NULL);
1107+
soft = memtoll(v[j+2],NULL);
11081108
soft_seconds = strtoll(v[j+3],NULL,10);
11091109

11101110
server.client_obuf_limits[class].hard_limit_bytes = hard;

src/debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,7 @@ NULL
362362
}
363363
emptyDb(-1,EMPTYDB_NO_FLAGS,NULL);
364364
protectClient(c);
365-
int ret = rdbLoad(server.rdb_filename,NULL);
365+
int ret = rdbLoad(NULL);
366366
unprotectClient(c);
367367
if (ret != C_OK) {
368368
addReplyError(c,"Error trying to load the RDB dump");

src/geo.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -659,7 +659,7 @@ void georadiusGeneric(client *c, int flags) {
659659
zsetConvertToZiplistIfNeeded(zobj,maxelelen);
660660
setKey(c->db,storekey,zobj);
661661
decrRefCount(zobj);
662-
notifyKeyspaceEvent(NOTIFY_LIST,"georadiusstore",storekey,
662+
notifyKeyspaceEvent(NOTIFY_ZSET,"georadiusstore",storekey,
663663
c->db->id);
664664
server.dirty += returned_items;
665665
} else if (dbDelete(c->db,storekey)) {

src/hyperloglog.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -614,6 +614,7 @@ int hllSparseToDense(robj *o) {
614614
} else {
615615
runlen = HLL_SPARSE_VAL_LEN(p);
616616
regval = HLL_SPARSE_VAL_VALUE(p);
617+
if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */
617618
while(runlen--) {
618619
HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval);
619620
idx++;
@@ -1013,7 +1014,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) {
10131014
double m = HLL_REGISTERS;
10141015
double E;
10151016
int j;
1016-
int reghisto[HLL_Q+2] = {0};
1017+
/* Note that reghisto size could be just HLL_Q+2, becuase HLL_Q+1 is
1018+
* the maximum frequency of the "000...1" sequence the hash function is
1019+
* able to return. However it is slow to check for sanity of the
1020+
* input: instead we history array at a safe size: overflows will
1021+
* just write data to wrong, but correctly allocated, places. */
1022+
int reghisto[64] = {0};
10171023

10181024
/* Compute register histogram */
10191025
if (hdr->encoding == HLL_DENSE) {
@@ -1088,6 +1094,7 @@ int hllMerge(uint8_t *max, size_t cmax, robj *hll) {
10881094
} else {
10891095
runlen = HLL_SPARSE_VAL_LEN(p);
10901096
regval = HLL_SPARSE_VAL_VALUE(p);
1097+
if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */
10911098
while(runlen--) {
10921099
if (i < 0 || (size_t)i >= cmax)
10931100
return C_ERR;

0 commit comments

Comments
 (0)