forked from opencurve/curve
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chunkserver.conf.example
254 lines (240 loc) · 7.97 KB
/
chunkserver.conf.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
#
# Global settings
#
# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3
global.ip=127.0.0.1
global.port=8200
global.subnet=127.0.0.0/24
global.enable_external_server=false
global.external_ip=127.0.0.1
global.external_subnet=127.0.0.0/24
# chunk大小,一般16MB
global.chunk_size=16777216
# chunk 元数据页大小,一般4KB
global.meta_page_size=4096
# clone chunk允许的最长location长度
# chunk's block size, IO requests must align with it, supported value is |512| and |4096|
# it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations
# for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents
# if set to |512|, we need 4096 bytes bitmap for each chunk, so meta_page_size should be 8192 or larger.
global.block_size=4096
global.location_limit=3000
#
# MDS settings
#
#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777
mds.listen.addr=127.0.0.1:6666
# 向mds注册的最大重试次数
mds.register_retries=100
# 向mds注册的rpc超时时间,一般1000ms
mds.register_timeout=1000
# 向mds发送心跳的间隔,一般10s
mds.heartbeat_interval=10
# 向mds发送心跳的rpc超时间,一般1000ms
mds.heartbeat_timeout=5000
#
# Chunkserver settings
#
# chunkserver主目录
chunkserver.stor_uri=local://./0/
# chunkserver元数据文件
chunkserver.meta_uri=local://./0/chunkserver.dat
# disk类型
chunkserver.disk_type=nvme
# raft内部install snapshot带宽上限,一般20MB
chunkserver.snapshot_throttle_throughput_bytes=20971520
# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB,
# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个
# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而
# 不是20MB的带宽
chunkserver.snapshot_throttle_check_cycles=4
# 限制inflight io数量,一般是5000
chunkserver.max_inflight_requests=5000
#
# Testing purpose settings
#
test.create_testcopyset=false
test.testcopyset_poolid=666
test.testcopyset_copysetid=888888
test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0
#
# Copyset settings
#
# 是否检查任期,一般检查
copyset.check_term=true
# 是否关闭raft配置变更的服务,一般不关闭
copyset.disable_cli=false
copyset.log_applied_task=false
# raft选举超时时间,一般是5000ms
copyset.election_timeout_ms=1000
# raft打快照间隔,一般是1800s,也就是30分钟
copyset.snapshot_interval_s=1800
# add一个节点,add的节点首先以类似learner的角色拷贝数据
# 在跟leader差距catchup_margin个entry的时候,leader
# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定
# 会commit&apply,catchup_margin较小可以大概率确保learner
# 后续很快可以加入复制组
copyset.catchup_margin=1000
# copyset chunk数据目录
copyset.chunk_data_uri=local://./0/copysets
# raft wal log目录
copyset.raft_log_uri=curve://./0/copysets
# raft元数据目录
copyset.raft_meta_uri=local://./0/copysets
# raft snapshot目录
copyset.raft_snapshot_uri=curve://./0/copysets
# copyset回收目录
copyset.recycler_uri=local://./0/recycler
# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制
copyset.load_concurrency=10
# chunkserver use how many threads to use copyset complete sync.
copyset.sync_concurrency=20
# 检查copyset是否加载完成出现异常时的最大重试次数
copyset.check_retrytimes=3
# 当前peer的applied_index与leader上的committed_index差距小于该值
# 则判定copyset已经加载完成
copyset.finishload_margin=2000
# 循环判定copyset是否加载完成的内部睡眠时间
copyset.check_loadmargin_interval_ms=1000
# scan copyset interval
copyset.scan_interval_sec=5
# the size each scan 4MB
copyset.scan_size_byte=4194304
# the follower send scanmap to leader rpc timeout
copyset.scan_rpc_timeout_ms=1000
# the follower send scanmap to leader rpc retry times
copyset.scan_rpc_retry_times=3
# the follower send scanmap to leader rpc retry interval
copyset.scan_rpc_retry_interval_us=100000
# enable O_DSYNC when open chunkfile
copyset.enable_odsync_when_open_chunkfile=true
# sync trigger seconds
copyset.sync_trigger_seconds=25
# sync chunk limit default = 2MB
copyset.sync_chunk_limits=2097152
# 30s if the sum of write > sync_threshold, let the sync_chunk_limits doubled.
copyset.sync_threshold=65536
# check syncing interval
copyset.check_syncing_interval_ms=500
#
# Clone settings
#
# 禁止使用curveclient
clone.disable_curve_client=false
# 禁止使用s3adapter
clone.disable_s3_adapter=false
# 克隆的分片大小,一般1MB
clone.slice_size=1048576
# 读clone chunk时是否需要paste到本地
# 该配置对recover chunk请求类型无效
clone.enable_paste=false
# 克隆的线程数量
clone.thread_num=10
# 克隆的队列深度
clone.queue_depth=6000
# curve用户名
curve.root_username=root
# curve密码
curve.root_password=root_password
# client配置文件
curve.config_path=conf/cs_client.conf
# s3配置文件
s3.config_path=conf/s3.conf
# Curve File time to live
curve.curve_file_timeout_s=30
#
# Local FileSystem settings
#
# 是否开启使用renameat2,ext4内核3.15以后开始支持
fs.enable_renameat2=true
#
# metrics settings
# true means on, false means off
#
metric.onoff=true
#
# Storage engine settings
#
storeng.sync_write=false
#
# QoS settings
#
#
# Concurrent apply module
# 并发模块写线程的并发度,一般是10
wconcurrentapply.size=10
# 并发模块写线程的队列深度
wconcurrentapply.queuedepth=1
# 并发模块读线程的并发度,一般是5
rconcurrentapply.size=5
# 并发模块读线程的队列深度
rconcurrentapply.queuedepth=1
#
# Chunkfile pool
#
# 是否开启从chunkfilepool获取chunk,一般是true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
chunkfilepool.chunk_file_pool_dir=./0/chunks
# chunkfilepool meta文件路径
#chunkfilepool.meta_path=./chunkfilepool.meta
# chunkfilepool meta文件大小
chunkfilepool.cpmeta_file_size=4096
# chunkfilepool get chunk最大重试次数
chunkfilepool.retry_times=5
# Enable clean chunk
chunkfilepool.clean.enable=true
# The bytes per write for cleaning chunk (max: 1MB)
chunkfilepool.clean.bytes_per_write=4096
# The throttle iops for cleaning chunk (4KB/IO)
chunkfilepool.clean.throttle_iops=500
# Whether allocate filePool by percent of disk size.
chunkfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
chunkfilepool.allocate_percent=80
# Preallocate storage size of chunkfilepool (None/KB/MB/GB/TB)
chunkfilepool.chunk_file_pool_size=1GB
# The thread num for format chunks
chunkfilepool.thread_num=1
#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效
walfilepool.use_chunk_file_pool=true
# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间
walfilepool.use_chunk_file_pool_reserve=15
# 是否开启从walfilepool获取chunk,一般是true
walfilepool.enable_get_segment_from_pool=true
# walpool目录
walfilepool.file_pool_dir=./0/
# walpool meta文件路径
walfilepool.meta_path=./walfilepool.meta
# walpool meta文件大小
walfilepool.segment_size=8388608
# WAL metapage大小
walfilepool.metapage_size=4096
# WAL filepool 元数据文件大小
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
walfilepool.retry_times=5
# Whether allocate filePool by percent of disk size.
walfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
walfilepool.allocate_percent=10
# Preallocate storage size size of walfilepool (None/KB/MB/GB/TB)
walfilepool.wal_file_pool_size=0
# The thread num for format chunks
walfilepool.thread_num=1
#
# trash settings
#
# chunkserver回收数据彻底删除的过期时间
trash.expire_afterSec=300
# chunkserver检查回收数据过期时间的周期
trash.scan_periodSec=120
# common option
#
# chunkserver 日志存放文件夹
chunkserver.common.logDir=./
# 单元测试情况下
# chunkserver.common.logDir=./runlog/