Skip to content

Commit

Permalink
mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to ge…
Browse files Browse the repository at this point in the history
…t it

Add support to get the values for the following variables:
 - target_max_objects
 - target_max_bytes
 - cache_target_dirty_ratio
 - cache_target_full_ratio
 - cache_min_flush_age
 - cache_min_evict_age

Signed-off-by: Joao Eduardo Luis <[email protected]>
  • Loading branch information
Joao Eduardo Luis committed Jul 2, 2014
1 parent 12079a7 commit ddc04c8
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 1 deletion.
13 changes: 13 additions & 0 deletions qa/workunits/cephtool/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -644,17 +644,30 @@ function test_mon_osd_pool_set()
ceph osd pool get rbd hit_set_fpp | grep "hit_set_fpp: 0.01"

ceph osd pool set rbd target_max_objects 123
ceph osd pool get rbd target_max_objects | \
grep 'target_max_objects:[ \t]\+123'
ceph osd pool set rbd target_max_bytes 123456
ceph osd pool get rbd target_max_bytes | \
grep 'target_max_bytes:[ \t]\+123456'
ceph osd pool set rbd cache_target_dirty_ratio .123
ceph osd pool get rbd cache_target_dirty_ratio | \
grep 'cache_target_dirty_ratio:[ \t]\+0.123'
expect_false ceph osd pool set rbd cache_target_dirty_ratio -.2
expect_false ceph osd pool set rbd cache_target_dirty_ratio 1.1
ceph osd pool set rbd cache_target_full_ratio .123
ceph osd pool get rbd cache_target_full_ratio | \
grep 'cache_target_full_ratio:[ \t]\+0.123'
ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
ceph osd pool set rbd cache_target_full_ratio 1.0
ceph osd pool set rbd cache_target_full_ratio 0
expect_false ceph osd pool set rbd cache_target_full_ratio 1.1
ceph osd pool set rbd cache_min_flush_age 123
ceph osd pool get rbd cache_min_flush_age | \
grep 'cache_min_flush_age:[ \t]\+123'
ceph osd pool set rbd cache_min_evict_age 234
ceph osd pool get rbd cache_min_evict_age | \
grep 'cache_min_evict_age:[ \t]\+234'


ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
}
Expand Down
2 changes: 1 addition & 1 deletion src/mon/MonCommands.h
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ COMMAND("osd pool rename " \
"rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
COMMAND("osd pool get " \
"name=pool,type=CephPoolname " \
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid", \
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age", \
"get pool parameter <var>", "osd", "r", "cli,rest")
COMMAND("osd pool set " \
"name=pool,type=CephPoolname " \
Expand Down
33 changes: 33 additions & 0 deletions src/mon/OSDMonitor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2505,6 +2505,24 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
f->dump_float("hit_set_fpp", bloomp->get_fpp());
}
} else if (var == "target_max_objects") {
f->dump_unsigned("target_max_objects", p->target_max_objects);
} else if (var == "target_max_bytes") {
f->dump_unsigned("target_max_bytes", p->target_max_bytes);
} else if (var == "cache_target_dirty_ratio") {
f->dump_unsigned("cache_target_dirty_ratio_micro",
p->cache_target_dirty_ratio_micro);
f->dump_float("cache_target_dirty_ratio",
((float)p->cache_target_dirty_ratio_micro/1000000));
} else if (var == "cache_target_full_ratio") {
f->dump_unsigned("cache_target_full_ratio_micro",
p->cache_target_full_ratio_micro);
f->dump_float("cache_target_full_ratio",
((float)p->cache_target_full_ratio_micro/1000000));
} else if (var == "cache_min_flush_age") {
f->dump_unsigned("cache_min_flush_age", p->cache_min_flush_age);
} else if (var == "cache_min_evict_age") {
f->dump_unsigned("cache_min_evict_age", p->cache_min_evict_age);
}

f->close_section();
Expand Down Expand Up @@ -2538,7 +2556,22 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
}
BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
ss << "hit_set_fpp: " << bloomp->get_fpp();
} else if (var == "target_max_objects") {
ss << "target_max_objects: " << p->target_max_objects;
} else if (var == "target_max_bytes") {
ss << "target_max_bytes: " << p->target_max_bytes;
} else if (var == "cache_target_dirty_ratio") {
ss << "cache_target_dirty_ratio: "
<< ((float)p->cache_target_dirty_ratio_micro/1000000);
} else if (var == "cache_target_full_ratio") {
ss << "cache_target_full_ratio: "
<< ((float)p->cache_target_full_ratio_micro/1000000);
} else if (var == "cache_min_flush_age") {
ss << "cache_min_flush_age: " << p->cache_min_flush_age;
} else if (var == "cache_min_evict_age") {
ss << "cache_min_evict_age: " << p->cache_min_evict_age;
}

rdata.append(ss);
ss.str("");
}
Expand Down

0 comments on commit ddc04c8

Please sign in to comment.