Skip to content

Commit

Permalink
Merge pull request ceph#2046 from ceph/wip-8670
Browse files Browse the repository at this point in the history
mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to get it

Reviewed-by: Loic Dachary <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
  • Loading branch information
Sage Weil committed Jul 3, 2014
2 parents 149a305 + ddc04c8 commit 55e297a
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 2 deletions.
2 changes: 1 addition & 1 deletion doc/dev/cache-pool.rst
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ evicting clean objects when we reach 80% of the target size.
The target size can be specified either in terms of objects or bytes::

ceph osd pool set foo-hot target_max_bytes 1000000000000 # 1 TB
ceph osd pool set foo-hot target_max_objets 1000000 # 1 million objects
ceph osd pool set foo-hot target_max_objects 1000000 # 1 million objects

Note that if both limits are specified, Ceph will begin flushing or
evicting when either threshold is triggered.
Expand Down
13 changes: 13 additions & 0 deletions qa/workunits/cephtool/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -679,17 +679,30 @@ function test_mon_osd_pool_set()
ceph osd pool get rbd hit_set_fpp | grep "hit_set_fpp: 0.01"

ceph osd pool set rbd target_max_objects 123
ceph osd pool get rbd target_max_objects | \
grep 'target_max_objects:[ \t]\+123'
ceph osd pool set rbd target_max_bytes 123456
ceph osd pool get rbd target_max_bytes | \
grep 'target_max_bytes:[ \t]\+123456'
ceph osd pool set rbd cache_target_dirty_ratio .123
ceph osd pool get rbd cache_target_dirty_ratio | \
grep 'cache_target_dirty_ratio:[ \t]\+0.123'
expect_false ceph osd pool set rbd cache_target_dirty_ratio -.2
expect_false ceph osd pool set rbd cache_target_dirty_ratio 1.1
ceph osd pool set rbd cache_target_full_ratio .123
ceph osd pool get rbd cache_target_full_ratio | \
grep 'cache_target_full_ratio:[ \t]\+0.123'
ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
ceph osd pool set rbd cache_target_full_ratio 1.0
ceph osd pool set rbd cache_target_full_ratio 0
expect_false ceph osd pool set rbd cache_target_full_ratio 1.1
ceph osd pool set rbd cache_min_flush_age 123
ceph osd pool get rbd cache_min_flush_age | \
grep 'cache_min_flush_age:[ \t]\+123'
ceph osd pool set rbd cache_min_evict_age 234
ceph osd pool get rbd cache_min_evict_age | \
grep 'cache_min_evict_age:[ \t]\+234'


ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
}
Expand Down
2 changes: 1 addition & 1 deletion src/mon/MonCommands.h
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,7 @@ COMMAND("osd pool rename " \
"rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
COMMAND("osd pool get " \
"name=pool,type=CephPoolname " \
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid", \
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age", \
"get pool parameter <var>", "osd", "r", "cli,rest")
COMMAND("osd pool set " \
"name=pool,type=CephPoolname " \
Expand Down
33 changes: 33 additions & 0 deletions src/mon/OSDMonitor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2505,6 +2505,24 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
f->dump_float("hit_set_fpp", bloomp->get_fpp());
}
} else if (var == "target_max_objects") {
f->dump_unsigned("target_max_objects", p->target_max_objects);
} else if (var == "target_max_bytes") {
f->dump_unsigned("target_max_bytes", p->target_max_bytes);
} else if (var == "cache_target_dirty_ratio") {
f->dump_unsigned("cache_target_dirty_ratio_micro",
p->cache_target_dirty_ratio_micro);
f->dump_float("cache_target_dirty_ratio",
((float)p->cache_target_dirty_ratio_micro/1000000));
} else if (var == "cache_target_full_ratio") {
f->dump_unsigned("cache_target_full_ratio_micro",
p->cache_target_full_ratio_micro);
f->dump_float("cache_target_full_ratio",
((float)p->cache_target_full_ratio_micro/1000000));
} else if (var == "cache_min_flush_age") {
f->dump_unsigned("cache_min_flush_age", p->cache_min_flush_age);
} else if (var == "cache_min_evict_age") {
f->dump_unsigned("cache_min_evict_age", p->cache_min_evict_age);
}

f->close_section();
Expand Down Expand Up @@ -2538,7 +2556,22 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
}
BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
ss << "hit_set_fpp: " << bloomp->get_fpp();
} else if (var == "target_max_objects") {
ss << "target_max_objects: " << p->target_max_objects;
} else if (var == "target_max_bytes") {
ss << "target_max_bytes: " << p->target_max_bytes;
} else if (var == "cache_target_dirty_ratio") {
ss << "cache_target_dirty_ratio: "
<< ((float)p->cache_target_dirty_ratio_micro/1000000);
} else if (var == "cache_target_full_ratio") {
ss << "cache_target_full_ratio: "
<< ((float)p->cache_target_full_ratio_micro/1000000);
} else if (var == "cache_min_flush_age") {
ss << "cache_min_flush_age: " << p->cache_min_flush_age;
} else if (var == "cache_min_evict_age") {
ss << "cache_min_evict_age: " << p->cache_min_evict_age;
}

rdata.append(ss);
ss.str("");
}
Expand Down

0 comments on commit 55e297a

Please sign in to comment.