Skip to content

Commit

Permalink
lru_crawler_tocrawl to limit inspected per run
Browse files Browse the repository at this point in the history
Since we start from the tail, and some slabs could be many millions of items,
it doesn't always make sense to walk the entire thing looking for data to use.

This could be a lot smarter: say specify a percentage, and it'll only crawl a
percentage of used_chunks for that slab for instance.
  • Loading branch information
dormando committed Apr 17, 2014
1 parent b3bbd3b commit e31a591
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 5 deletions.
17 changes: 15 additions & 2 deletions doc/protocol.txt
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,19 @@ The response line could be one of:

- "OK"

- "CLIENT_ERROR [message]" indicating a formato or bounds issue.
- "CLIENT_ERROR [message]" indicating a format or bounds issue.

lru_crawler tocrawl <32u>

- The maximum number of items to inspect in a slab class per run request. This
allows you to avoid scanning all of very large slabs when it is unlikely to
find items to expire.

The response line could be one of:

- "OK"

- "CLIENT_ERROR [message]" indicating a format or bound issue.

lru_crawler crawl <classid,classid,classid|all>

Expand Down Expand Up @@ -614,7 +626,8 @@ other stats command.
| slab_automove | bool | Whether slab page automover is enabled |
| hash_algorithm | char | Hash table algorithm in use |
| lru_crawler | bool | Whether the LRU crawler is enabled |
| lru_Crawler_sleep | 32 | Microseconds to sleep between LRU crawls |
| lru_crawler_sleep | 32 | Microseconds to sleep between LRU crawls |
| lru_crawler_tocrawl| 32u | Max items to crawl per slab per run |
|-------------------+----------+----------------------------------------------|


Expand Down
4 changes: 3 additions & 1 deletion items.c
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,8 @@ static void *item_crawler_thread(void *arg) {
}
pthread_mutex_lock(&cache_lock);
search = crawler_crawl_q((item *)&crawlers[i]);
if (search == NULL) {
if (search == NULL ||
(crawlers[i].remaining && --crawlers[i].remaining < 1)) {
if (settings.verbose > 2)
fprintf(stderr, "Nothing left to crawl for %d\n", i);
crawlers[i].it_flags = 0;
Expand Down Expand Up @@ -915,6 +916,7 @@ enum crawler_result_type lru_crawler_crawl(char *slabs) {
crawlers[sid].next = 0;
crawlers[sid].prev = 0;
crawlers[sid].time = 0;
crawlers[sid].remaining = settings.lru_crawler_tocrawl;
crawlers[sid].slabs_clsid = sid;
crawler_link_q((item *)&crawlers[sid]);
crawler_count++;
Expand Down
25 changes: 24 additions & 1 deletion memcached.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ static void settings_init(void) {
settings.maxconns_fast = false;
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;
settings.lru_crawler_tocrawl = 0;
settings.hashpower_init = 0;
settings.slab_reassign = false;
settings.slab_automove = 0;
Expand Down Expand Up @@ -2657,6 +2658,7 @@ static void process_stat_settings(ADD_STAT add_stats, void *c) {
APPEND_STAT("slab_automove", "%d", settings.slab_automove);
APPEND_STAT("lru_crawler", "%s", settings.lru_crawler ? "yes" : "no");
APPEND_STAT("lru_crawler_sleep", "%d", settings.lru_crawler_sleep);
APPEND_STAT("lru_crawler_tocrawl", "%lu", (unsigned long)settings.lru_crawler_tocrawl);
APPEND_STAT("tail_repair_time", "%d", settings.tail_repair_time);
APPEND_STAT("flush_enabled", "%s", settings.flush_enabled ? "yes" : "no");
APPEND_STAT("hash_algorithm", "%s", settings.hash_algorithm);
Expand Down Expand Up @@ -3584,6 +3586,15 @@ static void process_command(conn *c, char *command) {
break;
}
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "tocrawl") == 0) {
uint32_t tocrawl;
if (!safe_strtoul(tokens[2].value, &tocrawl)) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
settings.lru_crawler_tocrawl = tocrawl;
out_string(c, "OK");
return;
} else if (ntokens == 4 && strcmp(tokens[COMMAND_TOKEN + 1].value, "sleep") == 0) {
uint32_t tosleep;
if (!safe_strtoul(tokens[2].value, &tosleep)) {
Expand Down Expand Up @@ -4793,6 +4804,8 @@ static void usage(void) {
" - lru_crawler: Enable LRU Crawler background thread\n"
" - lru_crawler_sleep: Microseconds to sleep between items\n"
" default is 100.\n"
" - lru_crawler_tocrawl: Max items to crawl per slab per run\n"
" default is 0 (unlimited)\n"
);
return;
}
Expand Down Expand Up @@ -5022,6 +5035,7 @@ int main (int argc, char **argv) {
bool tcp_specified = false;
bool udp_specified = false;
enum hashfunc_type hash_type = JENKINS_HASH;
uint32_t tocrawl;

char *subopts;
char *subopts_value;
Expand All @@ -5033,7 +5047,8 @@ int main (int argc, char **argv) {
TAIL_REPAIR_TIME,
HASH_ALGORITHM,
LRU_CRAWLER,
LRU_CRAWLER_SLEEP
LRU_CRAWLER_SLEEP,
LRU_CRAWLER_TOCRAWL
};
char *const subopts_tokens[] = {
[MAXCONNS_FAST] = "maxconns_fast",
Expand All @@ -5044,6 +5059,7 @@ int main (int argc, char **argv) {
[HASH_ALGORITHM] = "hash_algorithm",
[LRU_CRAWLER] = "lru_crawler",
[LRU_CRAWLER_SLEEP] = "lru_crawler_sleep",
[LRU_CRAWLER_TOCRAWL] = "lru_crawler_tocrawl",
NULL
};

Expand Down Expand Up @@ -5357,6 +5373,13 @@ int main (int argc, char **argv) {
return 1;
}
break;
case LRU_CRAWLER_TOCRAWL:
if (!safe_strtoul(subopts_value, &tocrawl)) {
fprintf(stderr, "lru_crawler_tocrawl takes a numeric 32bit value\n");
return 1;
}
settings.lru_crawler_tocrawl = tocrawl;
break;
default:
printf("Illegal suboption \"%s\"\n", subopts_value);
return 1;
Expand Down
2 changes: 2 additions & 0 deletions memcached.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,7 @@ struct settings {
bool flush_enabled; /* flush_all enabled */
char *hash_algorithm; /* Hash algorithm in use */
int lru_crawler_sleep; /* Microsecond sleep between items */
uint32_t lru_crawler_tocrawl; /* Number of items to crawl per run */
};

extern struct stats stats;
Expand Down Expand Up @@ -369,6 +370,7 @@ typedef struct {
uint8_t it_flags; /* ITEM_* above */
uint8_t slabs_clsid;/* which slab class we're in */
uint8_t nkey; /* key length, w/terminating null and padding */
uint32_t remaining; /* Max keys to crawl per slab per invocation */
} crawler;

typedef struct {
Expand Down
2 changes: 1 addition & 1 deletion t/binary.t
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use strict;
use warnings;
use Test::More tests => 3600;
use Test::More tests => 3603;
use FindBin qw($Bin);
use lib "$Bin/lib";
use MemcachedTest;
Expand Down

0 comments on commit e31a591

Please sign in to comment.