forked from ceph/ceph
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMDCache.h
1280 lines (1096 loc) · 44.6 KB
/
MDCache.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDCACHE_H
#define CEPH_MDCACHE_H
#include <string_view>
#include "include/types.h"
#include "include/filepath.h"
#include "include/elist.h"
#include "messages/MCacheExpire.h"
#include "messages/MClientQuota.h"
#include "messages/MClientRequest.h"
#include "messages/MClientSnap.h"
#include "messages/MDentryLink.h"
#include "messages/MDentryUnlink.h"
#include "messages/MDirUpdate.h"
#include "messages/MDiscover.h"
#include "messages/MDiscoverReply.h"
#include "messages/MGatherCaps.h"
#include "messages/MGenericMessage.h"
#include "messages/MInodeFileCaps.h"
#include "messages/MLock.h"
#include "messages/MMDSCacheRejoin.h"
#include "messages/MMDSFindIno.h"
#include "messages/MMDSFindInoReply.h"
#include "messages/MMDSFragmentNotify.h"
#include "messages/MMDSFragmentNotifyAck.h"
#include "messages/MMDSOpenIno.h"
#include "messages/MMDSOpenInoReply.h"
#include "messages/MMDSResolve.h"
#include "messages/MMDSResolveAck.h"
#include "messages/MMDSSlaveRequest.h"
#include "messages/MMDSSnapUpdate.h"
#include "osdc/Filer.h"
#include "CInode.h"
#include "CDentry.h"
#include "CDir.h"
#include "include/Context.h"
#include "events/EMetaBlob.h"
#include "RecoveryQueue.h"
#include "StrayManager.h"
#include "OpenFileTable.h"
#include "MDSContext.h"
#include "MDSMap.h"
#include "Mutation.h"
class PerfCounters;
class MDSRank;
class Session;
class Migrator;
class Session;
class ESubtreeMap;
enum {
l_mdc_first = 3000,
// How many inodes currently in stray dentries
l_mdc_num_strays,
// How many stray dentries are currently delayed for purge due to refs
l_mdc_num_strays_delayed,
// How many stray dentries are currently being enqueued for purge
l_mdc_num_strays_enqueuing,
// How many dentries have ever been added to stray dir
l_mdc_strays_created,
// How many dentries have been passed on to PurgeQueue
l_mdc_strays_enqueued,
// How many strays have been reintegrated?
l_mdc_strays_reintegrated,
// How many strays have been migrated?
l_mdc_strays_migrated,
// How many inode sizes currently being recovered
l_mdc_num_recovering_processing,
// How many inodes currently waiting to have size recovered
l_mdc_num_recovering_enqueued,
// How many inodes waiting with elevated priority for recovery
l_mdc_num_recovering_prioritized,
// How many inodes ever started size recovery
l_mdc_recovery_started,
// How many inodes ever completed size recovery
l_mdc_recovery_completed,
l_mdss_ireq_enqueue_scrub,
l_mdss_ireq_exportdir,
l_mdss_ireq_flush,
l_mdss_ireq_fragmentdir,
l_mdss_ireq_fragstats,
l_mdss_ireq_inodestats,
l_mdc_last,
};
// flags for predirty_journal_parents()
static const int PREDIRTY_PRIMARY = 1; // primary dn, adjust nested accounting
static const int PREDIRTY_DIR = 2; // update parent dir mtime/size
static const int PREDIRTY_SHALLOW = 4; // only go to immediate parent (for easier rollback)
class MDCache {
public:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
typedef std::map<mds_rank_t, MCacheExpire::ref> expiremap;
// my master
MDSRank *mds;
// -- my cache --
LRU lru; // dentry lru for expiring items from cache
LRU bottom_lru; // dentries that should be trimmed ASAP
protected:
ceph::unordered_map<inodeno_t,CInode*> inode_map; // map of head inodes by ino
map<vinodeno_t, CInode*> snap_inode_map; // map of snap inodes by ino
CInode *root; // root inode
CInode *myin; // .ceph/mds%d dir
bool readonly;
void set_readonly() { readonly = true; }
CInode *strays[NUM_STRAY]; // my stray dir
int stray_index;
CInode *get_stray() {
return strays[stray_index];
}
set<CInode*> base_inodes;
std::unique_ptr<PerfCounters> logger;
Filer filer;
bool exceeded_size_limit;
private:
uint64_t cache_inode_limit;
uint64_t cache_memory_limit;
double cache_reservation;
double cache_health_threshold;
public:
uint64_t cache_limit_inodes(void) {
return cache_inode_limit;
}
uint64_t cache_limit_memory(void) {
return cache_memory_limit;
}
double cache_toofull_ratio(void) const {
double inode_reserve = cache_inode_limit*(1.0-cache_reservation);
double memory_reserve = cache_memory_limit*(1.0-cache_reservation);
return fmax(0.0, fmax((cache_size()-memory_reserve)/memory_reserve, cache_inode_limit == 0 ? 0.0 : (CInode::count()-inode_reserve)/inode_reserve));
}
bool cache_toofull(void) const {
return cache_toofull_ratio() > 0.0;
}
uint64_t cache_size(void) const {
return mempool::get_pool(mempool::mds_co::id).allocated_bytes();
}
bool cache_overfull(void) const {
return (cache_inode_limit > 0 && CInode::count() > cache_inode_limit*cache_health_threshold) || (cache_size() > cache_memory_limit*cache_health_threshold);
}
void advance_stray() {
stray_index = (stray_index+1)%NUM_STRAY;
}
void activate_stray_manager();
/**
* Call this when you know that a CDentry is ready to be passed
* on to StrayManager (i.e. this is a stray you've just created)
*/
void notify_stray(CDentry *dn) {
ceph_assert(dn->get_dir()->get_inode()->is_stray());
stray_manager.eval_stray(dn);
}
void maybe_eval_stray(CInode *in, bool delay=false);
void clear_dirty_bits_for_stray(CInode* diri);
bool is_readonly() { return readonly; }
void force_readonly();
DecayRate decayrate;
int num_shadow_inodes;
int num_inodes_with_caps;
unsigned max_dir_commit_size;
static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
file_layout_t default_file_layout;
file_layout_t default_log_layout;
void register_perfcounters();
// -- client leases --
public:
static const int client_lease_pools = 3;
float client_lease_durations[client_lease_pools];
protected:
xlist<ClientLease*> client_leases[client_lease_pools];
public:
void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
client_leases[pool].push_back(&r->item_lease);
r->ttl = ttl;
}
void notify_stray_removed()
{
stray_manager.notify_stray_removed();
}
void notify_stray_created()
{
stray_manager.notify_stray_created();
}
void eval_remote(CDentry *dn)
{
stray_manager.eval_remote(dn);
}
// -- client caps --
uint64_t last_cap_id;
// -- discover --
struct discover_info_t {
ceph_tid_t tid;
mds_rank_t mds;
inodeno_t ino;
frag_t frag;
snapid_t snap;
filepath want_path;
CInode *basei;
bool want_base_dir;
bool want_xlocked;
discover_info_t() :
tid(0), mds(-1), snap(CEPH_NOSNAP), basei(NULL),
want_base_dir(false), want_xlocked(false) {}
~discover_info_t() {
if (basei)
basei->put(MDSCacheObject::PIN_DISCOVERBASE);
}
void pin_base(CInode *b) {
basei = b;
basei->get(MDSCacheObject::PIN_DISCOVERBASE);
}
};
map<ceph_tid_t, discover_info_t> discovers;
ceph_tid_t discover_last_tid;
void _send_discover(discover_info_t& dis);
discover_info_t& _create_discover(mds_rank_t mds) {
ceph_tid_t t = ++discover_last_tid;
discover_info_t& d = discovers[t];
d.tid = t;
d.mds = mds;
return d;
}
// waiters
map<int, map<inodeno_t, MDSInternalContextBase::vec > > waiting_for_base_ino;
void discover_base_ino(inodeno_t want_ino, MDSInternalContextBase *onfinish, mds_rank_t from=MDS_RANK_NONE);
void discover_dir_frag(CInode *base, frag_t approx_fg, MDSInternalContextBase *onfinish,
mds_rank_t from=MDS_RANK_NONE);
void discover_path(CInode *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
bool want_xlocked=false, mds_rank_t from=MDS_RANK_NONE);
void discover_path(CDir *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
bool want_xlocked=false);
void kick_discovers(mds_rank_t who); // after a failure.
// -- subtrees --
protected:
/* subtree keys and each tree's non-recursive nested subtrees (the "bounds") */
map<CDir*,set<CDir*> > subtrees;
map<CInode*,list<pair<CDir*,CDir*> > > projected_subtree_renames; // renamed ino -> target dir
// adjust subtree auth specification
// dir->dir_auth
// imports/exports/nested_exports
// join/split subtrees as appropriate
public:
bool is_subtrees() { return !subtrees.empty(); }
void list_subtrees(list<CDir*>& ls);
void adjust_subtree_auth(CDir *root, mds_authority_t auth, bool adjust_pop=true);
void adjust_subtree_auth(CDir *root, mds_rank_t a, mds_rank_t b=CDIR_AUTH_UNKNOWN) {
adjust_subtree_auth(root, mds_authority_t(a,b));
}
void adjust_bounded_subtree_auth(CDir *dir, const set<CDir*>& bounds, mds_authority_t auth);
void adjust_bounded_subtree_auth(CDir *dir, const set<CDir*>& bounds, mds_rank_t a) {
adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
}
void adjust_bounded_subtree_auth(CDir *dir, const vector<dirfrag_t>& bounds, const mds_authority_t &auth);
void adjust_bounded_subtree_auth(CDir *dir, const vector<dirfrag_t>& bounds, mds_rank_t a) {
adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
}
void map_dirfrag_set(const list<dirfrag_t>& dfs, set<CDir*>& result);
void try_subtree_merge(CDir *root);
void try_subtree_merge_at(CDir *root, set<CInode*> *to_eval, bool adjust_pop=true);
void subtree_merge_writebehind_finish(CInode *in, MutationRef& mut);
void eval_subtree_root(CInode *diri);
CDir *get_subtree_root(CDir *dir);
CDir *get_projected_subtree_root(CDir *dir);
bool is_leaf_subtree(CDir *dir) {
ceph_assert(subtrees.count(dir));
return subtrees[dir].empty();
}
void remove_subtree(CDir *dir);
bool is_subtree(CDir *root) {
return subtrees.count(root);
}
void get_subtree_bounds(CDir *root, set<CDir*>& bounds);
void get_wouldbe_subtree_bounds(CDir *root, set<CDir*>& bounds);
void verify_subtree_bounds(CDir *root, const set<CDir*>& bounds);
void verify_subtree_bounds(CDir *root, const list<dirfrag_t>& bounds);
void project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir);
void adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop);
void get_auth_subtrees(set<CDir*>& s);
void get_fullauth_subtrees(set<CDir*>& s);
int num_subtrees();
int num_subtrees_fullauth();
int num_subtrees_fullnonauth();
protected:
// -- requests --
ceph::unordered_map<metareqid_t, MDRequestRef> active_requests;
public:
int get_num_client_requests();
MDRequestRef request_start(const MClientRequest::const_ref& req);
MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, const Message::const_ref &m);
MDRequestRef request_start_internal(int op);
bool have_request(metareqid_t rid) {
return active_requests.count(rid);
}
MDRequestRef request_get(metareqid_t rid);
void request_pin_ref(MDRequestRef& r, CInode *ref, vector<CDentry*>& trace);
void request_finish(MDRequestRef& mdr);
void request_forward(MDRequestRef& mdr, mds_rank_t mds, int port=0);
void dispatch_request(MDRequestRef& mdr);
void request_drop_foreign_locks(MDRequestRef& mdr);
void request_drop_non_rdlocks(MDRequestRef& r);
void request_drop_locks(MDRequestRef& r);
void request_cleanup(MDRequestRef& r);
void request_kill(MDRequestRef& r); // called when session closes
// journal/snap helpers
CInode *pick_inode_snap(CInode *in, snapid_t follows);
CInode *cow_inode(CInode *in, snapid_t last);
void journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, CDentry *dn,
snapid_t follows=CEPH_NOSNAP,
CInode **pcow_inode=0, CDentry::linkage_t *dnl=0);
void journal_cow_inode(MutationRef& mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP,
CInode **pcow_inode=0);
void journal_dirty_inode(MutationImpl *mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP);
void project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t first,
int linkunlink, SnapRealm *prealm);
void _project_rstat_inode_to_frag(CInode::mempool_inode & inode, snapid_t ofirst, snapid_t last,
CDir *parent, int linkunlink, bool update_inode);
void project_rstat_frag_to_inode(nest_info_t& rstat, nest_info_t& accounted_rstat,
snapid_t ofirst, snapid_t last,
CInode *pin, bool cow_head);
void broadcast_quota_to_client(CInode *in, client_t exclude_ct = -1);
void predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
CInode *in, CDir *parent,
int flags, int linkunlink=0,
snapid_t follows=CEPH_NOSNAP);
// slaves
void add_uncommitted_master(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &slaves, bool safe=false) {
uncommitted_masters[reqid].ls = ls;
uncommitted_masters[reqid].slaves = slaves;
uncommitted_masters[reqid].safe = safe;
}
void wait_for_uncommitted_master(metareqid_t reqid, MDSInternalContextBase *c) {
uncommitted_masters[reqid].waiters.push_back(c);
}
bool have_uncommitted_master(metareqid_t reqid, mds_rank_t from) {
auto p = uncommitted_masters.find(reqid);
return p != uncommitted_masters.end() && p->second.slaves.count(from) > 0;
}
void log_master_commit(metareqid_t reqid);
void logged_master_update(metareqid_t reqid);
void _logged_master_commit(metareqid_t reqid);
void committed_master_slave(metareqid_t r, mds_rank_t from);
void finish_committed_masters();
void _logged_slave_commit(mds_rank_t from, metareqid_t reqid);
// -- recovery --
protected:
set<mds_rank_t> recovery_set;
public:
void set_recovery_set(set<mds_rank_t>& s);
void handle_mds_failure(mds_rank_t who);
void handle_mds_recovery(mds_rank_t who);
protected:
// [resolve]
// from EImportStart w/o EImportFinish during journal replay
map<dirfrag_t, vector<dirfrag_t> > my_ambiguous_imports;
// from MMDSResolves
map<mds_rank_t, map<dirfrag_t, vector<dirfrag_t> > > other_ambiguous_imports;
map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> > uncommitted_slave_updates; // slave: for replay.
map<CInode*, int> uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit.
map<CInode*, int> uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit.
// track master requests whose slaves haven't acknowledged commit
struct umaster {
set<mds_rank_t> slaves;
LogSegment *ls;
MDSInternalContextBase::vec waiters;
bool safe;
bool committing;
bool recovering;
umaster() : ls(NULL), safe(false), committing(false), recovering(false) {}
};
map<metareqid_t, umaster> uncommitted_masters; // master: req -> slave set
set<metareqid_t> pending_masters;
map<int, set<metareqid_t> > ambiguous_slave_updates;
friend class ESlaveUpdate;
friend class ECommitted;
bool resolves_pending;
set<mds_rank_t> resolve_gather; // nodes i need resolves from
set<mds_rank_t> resolve_ack_gather; // nodes i need a resolve_ack from
set<version_t> resolve_snapclient_commits;
map<metareqid_t, mds_rank_t> resolve_need_rollback; // rollbacks i'm writing to the journal
map<mds_rank_t, MMDSResolve::const_ref> delayed_resolve;
void handle_resolve(const MMDSResolve::const_ref &m);
void handle_resolve_ack(const MMDSResolveAck::const_ref &m);
void process_delayed_resolve();
void discard_delayed_resolve(mds_rank_t who);
void maybe_resolve_finish();
void disambiguate_my_imports();
void disambiguate_other_imports();
void trim_unlinked_inodes();
void add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate*);
void finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
MDSlaveUpdate* get_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
void send_slave_resolves();
void send_subtree_resolves();
void maybe_finish_slave_resolve();
public:
void recalc_auth_bits(bool replay);
void remove_inode_recursive(CInode *in);
bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
auto p = ambiguous_slave_updates.find(master);
return p != ambiguous_slave_updates.end() && p->second.count(reqid);
}
void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
ambiguous_slave_updates[master].insert(reqid);
}
void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
auto p = ambiguous_slave_updates.find(master);
auto q = p->second.find(reqid);
ceph_assert(q != p->second.end());
p->second.erase(q);
if (p->second.empty())
ambiguous_slave_updates.erase(p);
}
void add_rollback(metareqid_t reqid, mds_rank_t master) {
resolve_need_rollback[reqid] = master;
}
void finish_rollback(metareqid_t reqid);
// ambiguous imports
void add_ambiguous_import(dirfrag_t base, const vector<dirfrag_t>& bounds);
void add_ambiguous_import(CDir *base, const set<CDir*>& bounds);
bool have_ambiguous_import(dirfrag_t base) {
return my_ambiguous_imports.count(base);
}
void get_ambiguous_import_bounds(dirfrag_t base, vector<dirfrag_t>& bounds) {
ceph_assert(my_ambiguous_imports.count(base));
bounds = my_ambiguous_imports[base];
}
void cancel_ambiguous_import(CDir *);
void finish_ambiguous_import(dirfrag_t dirino);
void resolve_start(MDSInternalContext *resolve_done_);
void send_resolves();
void maybe_send_pending_resolves() {
if (resolves_pending)
send_subtree_resolves();
}
void _move_subtree_map_bound(dirfrag_t df, dirfrag_t oldparent, dirfrag_t newparent,
map<dirfrag_t,vector<dirfrag_t> >& subtrees);
ESubtreeMap *create_subtree_map();
void clean_open_file_lists();
void dump_openfiles(Formatter *f);
bool dump_inode(Formatter *f, uint64_t number);
protected:
// [rejoin]
bool rejoins_pending;
set<mds_rank_t> rejoin_gather; // nodes from whom i need a rejoin
set<mds_rank_t> rejoin_sent; // nodes i sent a rejoin to
set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
set<mds_rank_t> rejoin_ack_gather; // nodes from whom i need a rejoin ack
map<mds_rank_t,map<inodeno_t,map<client_t,Capability::Import> > > rejoin_imported_caps;
map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_slave_exports;
map<client_t,entity_inst_t> rejoin_client_map;
map<client_t,client_metadata_t> rejoin_client_metadata_map;
map<client_t,pair<Session*,uint64_t> > rejoin_session_map;
map<inodeno_t,pair<mds_rank_t,map<client_t,cap_reconnect_t> > > cap_exports; // ino -> target, client -> capex
map<inodeno_t,map<client_t,map<mds_rank_t,cap_reconnect_t> > > cap_imports; // ino -> client -> frommds -> capex
set<inodeno_t> cap_imports_missing;
map<inodeno_t, MDSInternalContextBase::vec > cap_reconnect_waiters;
int cap_imports_num_opening;
set<CInode*> rejoin_undef_inodes;
set<CInode*> rejoin_potential_updated_scatterlocks;
set<CDir*> rejoin_undef_dirfrags;
map<mds_rank_t, set<CInode*> > rejoin_unlinked_inodes;
vector<CInode*> rejoin_recover_q, rejoin_check_q;
list<SimpleLock*> rejoin_eval_locks;
MDSInternalContextBase::vec rejoin_waiters;
void rejoin_walk(CDir *dir, const MMDSCacheRejoin::ref &rejoin);
void handle_cache_rejoin(const MMDSCacheRejoin::const_ref &m);
void handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &m);
CInode* rejoin_invent_inode(inodeno_t ino, snapid_t last);
CDir* rejoin_invent_dirfrag(dirfrag_t df);
void handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &m);
void rejoin_scour_survivor_replicas(mds_rank_t from, const MMDSCacheRejoin::const_ref &ack,
set<vinodeno_t>& acked_inodes,
set<SimpleLock *>& gather_locks);
void handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &m);
void rejoin_send_acks();
void rejoin_trim_undef_inodes();
void maybe_send_pending_rejoins() {
if (rejoins_pending)
rejoin_send_rejoins();
}
std::unique_ptr<MDSInternalContext> rejoin_done;
std::unique_ptr<MDSInternalContext> resolve_done;
public:
void rejoin_start(MDSInternalContext *rejoin_done_);
void rejoin_gather_finish();
void rejoin_send_rejoins();
void rejoin_export_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
int target=-1, bool drop_path=false) {
auto& ex = cap_exports[ino];
ex.first = target;
auto &_icr = ex.second[client] = icr;
if (drop_path)
_icr.path.clear();
}
void rejoin_recovered_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
mds_rank_t frommds=MDS_RANK_NONE, bool drop_path=false) {
auto &_icr = cap_imports[ino][client][frommds] = icr;
if (drop_path)
_icr.path.clear();
}
void rejoin_recovered_client(client_t client, const entity_inst_t& inst) {
rejoin_client_map.emplace(client, inst);
}
bool rejoin_has_cap_reconnect(inodeno_t ino) const {
return cap_imports.count(ino);
}
void add_replay_ino_alloc(inodeno_t ino) {
cap_imports_missing.insert(ino); // avoid opening ino during cache rejoin
}
const cap_reconnect_t *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
if (cap_imports.count(ino) &&
cap_imports[ino].count(client) &&
cap_imports[ino][client].count(MDS_RANK_NONE)) {
return &cap_imports[ino][client][MDS_RANK_NONE];
}
return NULL;
}
void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
ceph_assert(cap_imports[ino].size() == 1);
ceph_assert(cap_imports[ino][client].size() == 1);
cap_imports.erase(ino);
}
void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) {
cap_reconnect_waiters[ino].push_back(c);
}
// [reconnect/rejoin caps]
struct reconnected_cap_info_t {
inodeno_t realm_ino;
snapid_t snap_follows;
int dirty_caps;
bool snapflush;
reconnected_cap_info_t() :
realm_ino(0), snap_follows(0), dirty_caps(0), snapflush(false) {}
};
map<inodeno_t,map<client_t, reconnected_cap_info_t> > reconnected_caps; // inode -> client -> snap_follows,realmino
map<inodeno_t,map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
void add_reconnected_cap(client_t client, inodeno_t ino, const cap_reconnect_t& icr) {
reconnected_cap_info_t &info = reconnected_caps[ino][client];
info.realm_ino = inodeno_t(icr.capinfo.snaprealm);
info.snap_follows = icr.snap_follows;
}
void set_reconnected_dirty_caps(client_t client, inodeno_t ino, int dirty, bool snapflush) {
reconnected_cap_info_t &info = reconnected_caps[ino][client];
info.dirty_caps |= dirty;
if (snapflush)
info.snapflush = snapflush;
}
void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
reconnected_snaprealms[ino][client] = seq;
}
friend class C_MDC_RejoinOpenInoFinish;
friend class C_MDC_RejoinSessionsOpened;
void rejoin_open_ino_finish(inodeno_t ino, int ret);
void rejoin_prefetch_ino_finish(inodeno_t ino, int ret);
void rejoin_open_sessions_finish(map<client_t,pair<Session*,uint64_t> >& session_map);
bool process_imported_caps();
void choose_lock_states_and_reconnect_caps();
void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
map<client_t,MClientSnap::ref>& splits);
void prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, map<client_t,MClientSnap::ref>& splits);
void send_snaps(map<client_t,MClientSnap::ref>& splits);
Capability* rejoin_import_cap(CInode *in, client_t client, const cap_reconnect_t& icr, mds_rank_t frommds);
void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq,
map<client_t,MClientSnap::ref>& updates);
void try_reconnect_cap(CInode *in, Session *session);
void export_remaining_imported_caps();
// realm inodes
set<CInode*> rejoin_pending_snaprealms;
// cap imports. delayed snap parent opens.
map<client_t,set<CInode*> > delayed_imported_caps;
void do_cap_import(Session *session, CInode *in, Capability *cap,
uint64_t p_cap_id, ceph_seq_t p_seq, ceph_seq_t p_mseq,
int peer, int p_flags);
void do_delayed_cap_imports();
void rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, client_t client,
snapid_t snap_follows);
void open_snaprealms();
bool open_undef_inodes_dirfrags();
void opened_undef_inode(CInode *in);
void opened_undef_dirfrag(CDir *dir) {
rejoin_undef_dirfrags.erase(dir);
}
void reissue_all_caps();
friend class Locker;
friend class Migrator;
friend class MDBalancer;
// StrayManager needs to be able to remove_inode() from us
// when it is done purging
friend class StrayManager;
// File size recovery
private:
RecoveryQueue recovery_queue;
void identify_files_to_recover();
public:
void start_files_to_recover();
void do_file_recover();
void queue_file_recover(CInode *in);
void _queued_file_recover_cow(CInode *in, MutationRef& mut);
// subsystems
std::unique_ptr<Migrator> migrator;
public:
explicit MDCache(MDSRank *m, PurgeQueue &purge_queue_);
~MDCache();
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed,
const MDSMap &mds_map);
// debug
void log_stat();
// root inode
CInode *get_root() { return root; }
CInode *get_myin() { return myin; }
size_t get_cache_size() { return lru.lru_get_size(); }
// trimming
bool trim(uint64_t count=0);
private:
void trim_lru(uint64_t count, expiremap& expiremap);
bool trim_dentry(CDentry *dn, expiremap& expiremap);
void trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap);
bool trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap&);
void send_expire_messages(expiremap& expiremap);
void trim_non_auth(); // trim out trimmable non-auth items
public:
bool trim_non_auth_subtree(CDir *directory);
void standby_trim_segment(LogSegment *ls);
void try_trim_non_auth_subtree(CDir *dir);
bool can_trim_non_auth_dirfrag(CDir *dir) {
return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
uncommitted_slave_rename_olddir.count(dir->inode) == 0;
}
/**
* For all unreferenced inodes, dirs, dentries below an inode, compose
* expiry messages. This is used when giving up all replicas of entities
* for an MDS peer in the 'stopping' state, such that the peer can
* empty its cache and finish shutting down.
*
* We have to make sure we're only expiring un-referenced items to
* avoid interfering with ongoing stray-movement (we can't distinguish
* between the "moving my strays" and "waiting for my cache to empty"
* phases within 'stopping')
*
* @return false if we completed cleanly, true if caller should stop
* expiring because we hit something with refs.
*/
bool expire_recursive(CInode *in, expiremap& expiremap);
void trim_client_leases();
void check_memory_usage();
time last_recall_state;
// shutdown
private:
set<inodeno_t> shutdown_exporting_strays;
pair<dirfrag_t, string> shutdown_export_next;
public:
void shutdown_start();
void shutdown_check();
bool shutdown_pass();
bool shutdown(); // clear cache (ie at shutodwn)
bool shutdown_export_strays();
void shutdown_export_stray_finish(inodeno_t ino) {
if (shutdown_exporting_strays.erase(ino))
shutdown_export_strays();
}
bool did_shutdown_log_cap;
// inode_map
bool have_inode(vinodeno_t vino) {
if (vino.snapid == CEPH_NOSNAP)
return inode_map.count(vino.ino) ? true : false;
else
return snap_inode_map.count(vino) ? true : false;
}
bool have_inode(inodeno_t ino, snapid_t snap=CEPH_NOSNAP) {
return have_inode(vinodeno_t(ino, snap));
}
CInode* get_inode(vinodeno_t vino) {
if (vino.snapid == CEPH_NOSNAP) {
auto p = inode_map.find(vino.ino);
if (p != inode_map.end())
return p->second;
} else {
auto p = snap_inode_map.find(vino);
if (p != snap_inode_map.end())
return p->second;
}
return NULL;
}
CInode* get_inode(inodeno_t ino, snapid_t s=CEPH_NOSNAP) {
return get_inode(vinodeno_t(ino, s));
}
CInode* lookup_snap_inode(vinodeno_t vino) {
auto p = snap_inode_map.lower_bound(vino);
if (p != snap_inode_map.end() &&
p->second->ino() == vino.ino && p->second->first <= vino.snapid)
return p->second;
return NULL;
}
CDir* get_dirfrag(dirfrag_t df) {
CInode *in = get_inode(df.ino);
if (!in)
return NULL;
return in->get_dirfrag(df.frag);
}
CDir* get_dirfrag(inodeno_t ino, std::string_view dn) {
CInode *in = get_inode(ino);
if (!in)
return NULL;
frag_t fg = in->pick_dirfrag(dn);
return in->get_dirfrag(fg);
}
CDir* get_force_dirfrag(dirfrag_t df, bool replay) {
CInode *diri = get_inode(df.ino);
if (!diri)
return NULL;
CDir *dir = force_dir_fragment(diri, df.frag, replay);
if (!dir)
dir = diri->get_dirfrag(df.frag);
return dir;
}
MDSCacheObject *get_object(const MDSCacheObjectInfo &info);
public:
void add_inode(CInode *in);
void remove_inode(CInode *in);
protected:
void touch_inode(CInode *in) {
if (in->get_parent_dn())
touch_dentry(in->get_projected_parent_dn());
}
public:
void touch_dentry(CDentry *dn) {
if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
bottom_lru.lru_midtouch(dn);
} else {
if (dn->is_auth())
lru.lru_touch(dn);
else
lru.lru_midtouch(dn);
}
}
void touch_dentry_bottom(CDentry *dn) {
if (dn->state_test(CDentry::STATE_BOTTOMLRU))
return;
lru.lru_bottouch(dn);
}
protected:
void inode_remove_replica(CInode *in, mds_rank_t rep, bool rejoin,
set<SimpleLock *>& gather_locks);
void dentry_remove_replica(CDentry *dn, mds_rank_t rep, set<SimpleLock *>& gather_locks);
void rename_file(CDentry *srcdn, CDentry *destdn);
public:
// truncate
void truncate_inode(CInode *in, LogSegment *ls);
void _truncate_inode(CInode *in, LogSegment *ls);
void truncate_inode_finish(CInode *in, LogSegment *ls);
void truncate_inode_logged(CInode *in, MutationRef& mut);
void add_recovered_truncate(CInode *in, LogSegment *ls);
void remove_recovered_truncate(CInode *in, LogSegment *ls);
void start_recovered_truncates();
public:
CDir *get_auth_container(CDir *in);
CDir *get_export_container(CDir *dir);
void find_nested_exports(CDir *dir, set<CDir*>& s);
void find_nested_exports_under(CDir *import, CDir *dir, set<CDir*>& s);
private:
bool opening_root, open;
MDSInternalContextBase::vec waiting_for_open;
public:
void init_layouts();
void create_unlinked_system_inode(CInode *in, inodeno_t ino,
int mode) const;
CInode *create_system_inode(inodeno_t ino, int mode);
CInode *create_root_inode();
void create_empty_hierarchy(MDSGather *gather);
void create_mydir_hierarchy(MDSGather *gather);
bool is_open() { return open; }
void wait_for_open(MDSInternalContextBase *c) {
waiting_for_open.push_back(c);
}
void open_root_inode(MDSInternalContextBase *c);
void open_root();
void open_mydir_inode(MDSInternalContextBase *c);
void open_mydir_frag(MDSInternalContextBase *c);
void populate_mydir();
void _create_system_file(CDir *dir, const char *name, CInode *in, MDSInternalContextBase *fin);
void _create_system_file_finish(MutationRef& mut, CDentry *dn,
version_t dpv, MDSInternalContextBase *fin);
void open_foreign_mdsdir(inodeno_t ino, MDSInternalContextBase *c);
CDir *get_stray_dir(CInode *in);
CDentry *get_or_create_stray_dentry(CInode *in);
/**
* Find the given dentry (and whether it exists or not), its ancestors,
* and get them all into memory and usable on this MDS. This function
* makes a best-effort attempt to load everything; if it needs to
* go away and do something then it will put the request on a waitlist.
* It prefers the mdr, then the req, then the fin. (At least one of these
* must be non-null.)
*
* At least one of the params mdr, req, and fin must be non-null.
*
* @param mdr The MDRequest associated with the path. Can be null.
* @param cf A MDSContextFactory for waiter building.
* @param path The path to traverse to.
* @param pdnvec Data return parameter -- on success, contains a
* vector of dentries. On failure, is either empty or contains the
* full trace of traversable dentries.
* @param pin Data return parameter -- if successful, points to the inode
* associated with filepath. If unsuccessful, is null.
* @param onfail Specifies different lookup failure behaviors. If set to
* MDS_TRAVERSE_DISCOVERXLOCK, path_traverse will succeed on null
* dentries (instead of returning -ENOENT). If set to
* MDS_TRAVERSE_FORWARD, it will forward the request to the auth
* MDS if that becomes appropriate (ie, if it doesn't know the contents
* of a directory). If set to MDS_TRAVERSE_DISCOVER, it
* will attempt to look up the path from a different MDS (and bring them
* into its cache as replicas).
*
* @returns 0 on success, 1 on "not done yet", 2 on "forwarding", -errno otherwise.
* If it returns 1, the requester associated with this call has been placed
* on the appropriate waitlist, and it should unwind itself and back out.
* If it returns 2 the request has been forwarded, and again the requester
* should unwind itself and back out.
*/
int path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, const filepath& path,
vector<CDentry*> *pdnvec, CInode **pin, int onfail);
CInode *cache_traverse(const filepath& path);
void open_remote_dirfrag(CInode *diri, frag_t fg, MDSInternalContextBase *fin);
CInode *get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected=false);
bool parallel_fetch(map<inodeno_t,filepath>& pathmap, set<inodeno_t>& missing);
bool parallel_fetch_traverse_dir(inodeno_t ino, filepath& path,
set<CDir*>& fetch_queue, set<inodeno_t>& missing,
C_GatherBuilder &gather_bld);
void open_remote_dentry(CDentry *dn, bool projected, MDSInternalContextBase *fin,
bool want_xlocked=false);
void _open_remote_dentry_finish(CDentry *dn, inodeno_t ino, MDSInternalContextBase *fin,
bool want_xlocked, int r);
void make_trace(vector<CDentry*>& trace, CInode *in);
protected:
struct open_ino_info_t {
vector<inode_backpointer_t> ancestors;
set<mds_rank_t> checked;
mds_rank_t checking;
mds_rank_t auth_hint;
bool check_peers;
bool fetch_backtrace;
bool discover;