Skip to content

Commit

Permalink
Makefile: added makefile for make build and image.
Browse files Browse the repository at this point in the history
  • Loading branch information
Wine93 authored and YunhuiChen committed Jan 19, 2022
1 parent beac479 commit d7c6aed
Show file tree
Hide file tree
Showing 21 changed files with 1,050 additions and 29 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ runlog/
test/client/configs/*
!test/client/configs/.gitkeep

projects/*
docker/curvebs
docker/base/*
!docker/base/Dockerfile
!docker/base/Makefile

# curvefs
curvefs/devops/projects/
curvefs/devops/ssh/
Expand Down
21 changes: 21 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright (C) 2021 Jingli Chen (Wine93), NetEase Inc.

.PHONY: list build install image

prefix?= "$(PWD)/projects"
release?= 0
only?= "*"
tag?= "curvebs:unknown"
case?= "*"

list:
@bash util/build.sh --list

build:
@bash util/build.sh --only=$(only) --release=$(release)

install:
@bash util/install.sh --prefix=$(prefix) --only=$(only)

image:
@bash util/image.sh $(tag)
220 changes: 220 additions & 0 deletions conf/chunkserver.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
#
# Global settings
#
# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3
global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__
global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__
global.subnet=127.0.0.0/24
global.enable_external_server=true
global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__
global.external_subnet=127.0.0.0/24
# chunk大小,一般16MB
global.chunk_size=16777216
# chunk 元数据页大小,一般4KB
global.meta_page_size=4096
# clone chunk允许的最长location长度
global.location_limit=3000
# minimum alignment for io request
global.min_io_alignment=512

#
# MDS settings
#
#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777
mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__
# 向mds注册的最大重试次数
mds.register_retries=100
# 向mds注册的rpc超时时间,一般1000ms
mds.register_timeout=1000
# 向mds发送心跳的间隔,一般10s
mds.heartbeat_interval=10
# 向mds发送心跳的rpc超时间,一般1000ms
mds.heartbeat_timeout=5000

#
# Chunkserver settings
#
# chunkserver主目录
chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__
# chunkserver元数据文件
chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__
# disk类型
chunkserver.disk_type=nvme
# raft内部install snapshot带宽上限,一般20MB
chunkserver.snapshot_throttle_throughput_bytes=20971520
# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB,
# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个
# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而
# 不是20MB的带宽
chunkserver.snapshot_throttle_check_cycles=4

#
# Testing purpose settings
#
test.create_testcopyset=false
test.testcopyset_poolid=666
test.testcopyset_copysetid=888888
test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0

#
# Copyset settings
#
# 是否检查任期,一般检查
copyset.check_term=true
# 是否关闭raft配置变更的服务,一般不关闭
copyset.disable_cli=false
copyset.log_applied_task=false
# raft选举超时时间,一般是5000ms
copyset.election_timeout_ms=1000
# raft打快照间隔,一般是1800s,也就是30分钟
copyset.snapshot_interval_s=1800
# add一个节点,add的节点首先以类似learner的角色拷贝数据
# 在跟leader差距catchup_margin个entry的时候,leader
# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定
# 会commit&apply,catchup_margin较小可以大概率确保learner
# 后续很快可以加入复制组
copyset.catchup_margin=1000
# copyset chunk数据目录
copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft wal log目录
copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft元数据目录
copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft snapshot目录
copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# copyset回收目录
copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__
copyset.max_inflight_requests=5000
# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制
copyset.load_concurrency=10
# 检查copyset是否加载完成出现异常时的最大重试次数
copyset.check_retrytimes=3
# 当前peer的applied_index与leader上的committed_index差距小于该值
# 则判定copyset已经加载完成
copyset.finishload_margin=2000
# 循环判定copyset是否加载完成的内部睡眠时间
copyset.check_loadmargin_interval_ms=1000
# scan copyset interval
copyset.scan_interval_sec=5
# the size each scan 4MB
copyset.scan_size_byte=4194304
# the follower send scanmap to leader rpc timeout
copyset.scan_rpc_timeout_ms=1000
# the follower send scanmap to leader rpc retry times
copyset.scan_rpc_retry_times=3
# the follower send scanmap to leader rpc retry interval
copyset.scan_rpc_retry_interval_us=100000

#
# Clone settings
#
# 禁止使用curveclient
clone.disable_curve_client=false
# 禁止使用s3adapter
clone.disable_s3_adapter=false
# 克隆的分片大小,一般1MB
clone.slice_size=1048576
# 读clone chunk时是否需要paste到本地
# 该配置对recover chunk请求类型无效
clone.enable_paste=false
# 克隆的线程数量
clone.thread_num=10
# 克隆的队列深度
clone.queue_depth=6000
# curve用户名
curve.root_username=root
# curve密码
curve.root_password=root_password
# client配置文件
curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__
# s3配置文件
s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__
# Curve File time to live
curve.curve_file_timeout_s=30

#
# Local FileSystem settings
#
# 是否开启使用renameat2,ext4内核3.15以后开始支持
fs.enable_renameat2=true

#
# metrics settings
# true means on, false means off
#
metric.onoff=true

#
# Storage engine settings
#
storeng.sync_write=false

#
# QoS settings
#

#
# Concurrent apply module
# 并发模块写线程的并发度,一般是10
wconcurrentapply.size=10
# 并发模块写线程的队列深度
wconcurrentapply.queuedepth=1
# 并发模块读线程的并发度,一般是5
rconcurrentapply.size=5
# 并发模块读线程的队列深度
rconcurrentapply.queuedepth=1

#
# Chunkfile pool
#
# 是否开启从chunkfilepool获取chunk,一般是true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
chunkfilepool.chunk_file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__
# chunkfilepool meta文件路径
chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__
# chunkfilepool meta文件大小
chunkfilepool.cpmeta_file_size=4096
# chunkfilepool get chunk最大重试次数
chunkfilepool.retry_times=5
# Enable clean chunk
chunkfilepool.clean.enable=true
# The bytes per write for cleaning chunk (max: 1MB)
chunkfilepool.clean.bytes_per_write=4096
# The throttle iops for cleaning chunk (4KB/IO)
chunkfilepool.clean.throttle_iops=500

#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,则以下配置无效
walfilepool.use_chunk_file_pool=true
# 是否开启从walfilepool获取chunk,一般是true
walfilepool.enable_get_segment_from_pool=true
# walpool目录
walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__
# walpool meta文件路径
walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__
# walpool meta文件大小
walfilepool.segment_size=8388608
# WAL metapage大小
walfilepool.metapage_size=4096
# WAL filepool 元数据文件大小
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
walfilepool.retry_times=5

#
# trash settings
#
# chunkserver回收数据彻底删除的过期时间
trash.expire_afterSec=300
# chunkserver检查回收数据过期时间的周期
trash.scan_periodSec=120

# common option
#
# chunkserver 日志存放文件夹
chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__
# 单元测试情况下
# chunkserver.common.logDir=./runlog/
2 changes: 1 addition & 1 deletion conf/client.conf
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ global.fileIOSplitMaxSizeKB=64
# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3
global.logLevel=0
# 设置log的路径
global.logPath=/data/log/curve/
global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__
# 单元测试情况下
# logpath=./runlog/

Expand Down
4 changes: 2 additions & 2 deletions conf/cs_client.conf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#

# mds的地址信息,对于mds集群,地址以逗号隔开
mds.listen.addr=127.0.0.1:6666
mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__

# 初始化阶段向mds注册开关,默认为开
mds.registerToMDS=false
Expand Down Expand Up @@ -138,7 +138,7 @@ global.fileIOSplitMaxSizeKB=64
# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3
global.logLevel=0
# 设置log的路径
global.logPath=/data/log/curve/
global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__
# 单元测试情况下
# logpath=./runlog/

Expand Down
113 changes: 113 additions & 0 deletions conf/etcd.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# This is the configuration file for the etcd server.

# Human-readable name for this member.
name: # __CURVEADM_TEMPLATE__ etcd${service_host_sequence}${service_replica_sequence} __CURVEADM_TEMPLATE__

# Path to the data directory.
data-dir: # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__

# Path to the dedicated wal directory.
wal-dir: # __CURVEADM_TEMPLATE__ ${prefix}/data/wal __CURVEADM_TEMPLATE__

# Number of committed transactions to trigger a snapshot to disk.
snapshot-count: 10000

# Time (in milliseconds) of a heartbeat interval.
heartbeat-interval: 100

# Time (in milliseconds) for an election to timeout.
election-timeout: 1000
quota-backend-bytes: 0

# List of comma separated URLs to listen on for peer traffic.
listen-peer-urls: # __CURVEADM_TEMPLATE__ http://${service_addr}:${service_port} __CURVEADM_TEMPLATE__

# List of comma separated URLs to listen on for client traffic.
listen-client-urls: # __CURVEADM_TEMPLATE__ http://${service_addr}:${service_client_port} __CURVEADM_TEMPLATE__

# Maximum number of snapshot files to retain (0 is unlimited).
max-snapshots: 5

# Maximum number of wal files to retain (0 is unlimited).
max-wals: 5

# Comma-separated white list of origins for CORS (cross-origin resource sharing).
cors:

# List of this member's peer URLs to advertise to the rest of the cluster.
# The URLs needed to be a comma-separated list.
initial-advertise-peer-urls: # __CURVEADM_TEMPLATE__ http://${service_addr}:${service_port} __CURVEADM_TEMPLATE__

# List of this member's client URLs to advertise to the public.
# The URLs needed to be a comma-separated list.
advertise-client-urls: # __CURVEADM_TEMPLATE__ http://${service_addr}:${service_client_port} __CURVEADM_TEMPLATE__

# Discovery URL used to bootstrap the cluster.
discovery:

# Valid values include 'exit', 'proxy'
discovery-fallback: proxy

# HTTP proxy to use for traffic to discovery service.
discovery-proxy:

# DNS domain used to bootstrap initial cluster.
discovery-srv:

# Initial cluster configuration for bootstrapping.
initial-cluster: # __CURVEADM_TEMPLATE__ ${cluster_etcd_http_addr} __CURVEADM_TEMPLATE__

# Initial cluster token for the etcd cluster during bootstrap.
initial-cluster-token: etcd-cluster

# Initial cluster state ('new' or 'existing').
initial-cluster-state: new

# Reject reconfiguration requests that would cause quorum loss.
strict-reconfig-check: False

# Accept etcd V2 client requests
enable-v2: True

# Enable runtime profiling data via HTTP server
enable-pprof: True

# Valid values include 'on', 'readonly', 'off'
proxy: 'off'

# Time (in milliseconds) an endpoint will be held in a failed state.
proxy-failure-wait: 5000

# Time (in milliseconds) of the endpoints refresh interval.
proxy-refresh-interval: 30000

# Time (in milliseconds) for a dial to timeout.
proxy-dial-timeout: 1000

# Time (in milliseconds) for a write to timeout.
proxy-write-timeout: 5000

# Time (in milliseconds) for a read to timeout.
proxy-read-timeout: 0

# Enable debug-level logging for etcd.
debug: False

logger: zap

# Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
log-outputs: [stderr]

# Force to create a new one member cluster.
force-new-cluster: False

auto-compaction-mode: periodic
auto-compaction-retention: "1"

# Set level of detail for exported metrics, specify 'extensive' to include histogram metrics.
metrics: extensive

# Enable to run an additional Raft election phase.
pre-vote: True

enable-grpc-gateway: True
Loading

0 comments on commit d7c6aed

Please sign in to comment.