Skip to content

Commit

Permalink
Merge pull request labring#537 from oldthreefeng/fix_mispell
Browse files Browse the repository at this point in the history
fix mispell of etcd
  • Loading branch information
fanux authored Nov 30, 2020
2 parents 696c670 + cd3bbc8 commit 82cc023
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 18 deletions.
4 changes: 2 additions & 2 deletions cmd/etcd.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ import (

var exampleCmd = `
# snapshot save the etcd, the backupPath is on etcd nodes. not on the sealos init machine.
sealos etcd save --name snapshot --backupPath /opt/sealos/ectd-backup
sealos etcd save --name snapshot --backupPath /opt/sealos/etcd-backup
# save the snapshot to aliyun oss bucket. this recommend to use in cronjob
sealos etcd save --aliId youraliyunkeyid --aliKey youraliyunkeysecrets --ep oss-cn-hangzhou.aliyuncs.com \
--bucket etcdbackup --objectPath /sealos/ --docker
# snapshot restore the etcd
sealos etcd restore --name snapshot --backupPath /opt/sealos/ectd-backup
sealos etcd restore --name snapshot --backupPath /opt/sealos/etcd-backup
# etcd health check
sealos etcd health
Expand Down
24 changes: 12 additions & 12 deletions docs/etcdbackup.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ $ sealos etcd save -h
Flags:
--aliId string aliyun accessKeyId to save snapshot
--aliKey string aliyun accessKeySecrets to save snapshot
--backupPath string Specify snapshot backup dir (default "/opt/sealos/ectd-backup")
--backupPath string Specify snapshot backup dir (default "/opt/sealos/etcd-backup")
--bucket string oss bucketName to save snapshot
--docker snapshot your kubernets etcd in container, will add unix timestamp to snapshot name
--ep string aliyun endpoints to save snapshot
Expand All @@ -22,7 +22,7 @@ Flags:

- `--aliId`: 阿里云的`accessKeyId`
- `--aliKey`: 阿里云的`accessKeySecrets`
- `--backupPath`: 在执行sealos的主机以及master主机上的备份路径. 默认为`/opt/sealos/ectd-backup`
- `--backupPath`: 在执行sealos的主机以及master主机上的备份路径. 默认为`/opt/sealos/etcd-backup`
- `--bucket`: 阿里云的`oss bucketName`
- `--ep`: 阿里云`oss endpoint`. 例如: `oss-cn-hangzhou.aliyuncs.com`
- `--name`: 备份文件的名字, 默认为`snapshot`
Expand Down Expand Up @@ -197,11 +197,11 @@ Usage:
sealos etcd restore [flags]
Flags:
--backupPath string Specify snapshot backup dir (default "/opt/sealos/ectd-backup")
--backupPath string Specify snapshot backup dir (default "/opt/sealos/etcd-backup")
-f, --force restore need interactive to confirm
-h, --help help for restore
--name string Specify snapshot name (default "snapshot")
--restorePath string Specify snapshot restore dir (default "/opt/sealos/ectd-restore")
--restorePath string Specify snapshot restore dir (default "/opt/sealos/etcd-restore")
Global Flags:
--config string config file (default is $HOME/.sealos/config.yaml)
Expand All @@ -211,9 +211,9 @@ Global Flags:

选项说明

- `--backupPath`: 存储备份的文件夹. 默认和`save`下来的一致, `/opt/sealos/ectd-backup`
- `--backupPath`: 存储备份的文件夹. 默认和`save`下来的一致, `/opt/sealos/etcd-backup`

- `--restorePath`: 恢复文件夹. 默认`/opt/sealos/ectd-restore`. 配合主机名. 避免多`master`导致恢复文件夹重复.
- `--restorePath`: 恢复文件夹. 默认`/opt/sealos/etcd-restore`. 配合主机名. 避免多`master`导致恢复文件夹重复.
- `-f, --force`: 交互式确认是否要执行restore.
- `--name`: 备份的文件名字. 默认和`save`下来的一致. `snapshot`

Expand All @@ -227,14 +227,14 @@ restore cmd will stop your kubernetes cluster immediately and restore etcd from
17:34:17 [INFO] [ssh.go:12] [ssh][192.168.160.243] hostname
17:34:17 [DEBG] [ssh.go:24] [ssh][192.168.160.243]command result is: dev-k8s-master
17:34:17 [INFO] [ssh.go:105] [ssh][192.168.160.243] cd /tmp && rm -rf /opt/sealos/ectd-restore-dev-k8s-master
17:34:17 [INFO] [ssh.go:105] [ssh][192.168.160.243] cd /tmp && rm -rf /opt/sealos/etcd-restore-dev-k8s-master
17:34:17 [INFO] [ssh.go:12] [ssh][192.168.160.243] hostname
17:34:18 [DEBG] [ssh.go:24] [ssh][192.168.160.243]command result is: dev-k8s-master
{"level":"info","ts":1598866458.160008,"caller":"snapshot/v3_snapshot.go:296","msg":"restoring snapshot","path":"/opt/sealos/ectd-backup/snapshot","wal-dir":"/opt/sealos/ectd-restore-dev-k8s-master/member/wal","data-dir":"/opt/sealos/ectd-restore-dev-k8s-master","snap-dir":"/opt/sealos/ectd-restore-dev-k8s-master/member/snap"}
{"level":"info","ts":1598866458.160008,"caller":"snapshot/v3_snapshot.go:296","msg":"restoring snapshot","path":"/opt/sealos/etcd-backup/snapshot","wal-dir":"/opt/sealos/etcd-restore-dev-k8s-master/member/wal","data-dir":"/opt/sealos/etcd-restore-dev-k8s-master","snap-dir":"/opt/sealos/etcd-restore-dev-k8s-master/member/snap"}
{"level":"info","ts":1598866458.1982617,"caller":"mvcc/kvstore.go:380","msg":"restored last compact revision","meta-bucket-name":"meta","meta-bucket-name-key":"finishedCompactRev","restored-compact-revision":970469}
{"level":"info","ts":1598866458.2281547,"caller":"membership/cluster.go:392","msg":"added member","cluster-id":"d12074ddc55c9483","local-member-id":"0","added-peer-id":"5dfe17d3cf203a7e","added-peer-peer-urls":["https://192.168.160.243:2380"]}
{"level":"info","ts":1598866458.235216,"caller":"snapshot/v3_snapshot.go:309","msg":"restored snapshot","path":"/opt/sealos/ectd-backup/snapshot","wal-dir":"/opt/sealos/ectd-restore-dev-k8s-master/member/wal","data-dir":"/opt/sealos/ectd-restore-dev-k8s-master","snap-dir":"/opt/sealos/ectd-restore-dev-k8s-master/member/snap"}
{"level":"info","ts":1598866458.235216,"caller":"snapshot/v3_snapshot.go:309","msg":"restored snapshot","path":"/opt/sealos/etcd-backup/snapshot","wal-dir":"/opt/sealos/etcd-restore-dev-k8s-master/member/wal","data-dir":"/opt/sealos/etcd-restore-dev-k8s-master","snap-dir":"/opt/sealos/etcd-restore-dev-k8s-master/member/snap"}
17:34:28 [INFO] [ssh.go:105] [ssh][192.168.160.243] cd /tmp && mv /etc/kubernetes/manifests /etc/kubernetes/manifestslezSCljV
17:34:28 [INFO] [ssh.go:105] [ssh][192.168.160.243] cd /tmp && mv /var/lib/etcd /var/lib/etcdlezSCljV
17:34:38 [INFO] [etcd.go:136] send restore file to etcd master node and start etcd
Expand All @@ -244,19 +244,19 @@ restore cmd will stop your kubernetes cluster immediately and restore etcd from
17:34:39 [INFO] [etcd_restore.go:140] compress file
17:34:39 [INFO] [ssh.go:57] [ssh][192.168.160.243] mkdir -p /var/lib || true
17:34:39 [DEBG] [download.go:29] [192.168.160.243]please wait for mkDstDir
17:34:39 [INFO] [ssh.go:12] [ssh][192.168.160.243] ls -l /var/lib/ectd-restore-dev-k8s-master.tar 2>/dev/null |wc -l
17:34:39 [INFO] [ssh.go:12] [ssh][192.168.160.243] ls -l /var/lib/etcd-restore-dev-k8s-master.tar 2>/dev/null |wc -l
17:34:39 [DEBG] [ssh.go:24] [ssh][192.168.160.243]command result is: 0
17:34:39 [DEBG] [scp.go:24] [ssh]source file md5 value is bc76f9bb1aea210fb815a43aed27aa29
17:34:40 [ALRT] [scp.go:98] [ssh][192.168.160.243]transfer total size is: 1244.01KB ;speed is 1MB
17:34:40 [INFO] [ssh.go:12] [ssh][192.168.160.243] md5sum /var/lib/ectd-restore-dev-k8s-master.tar | cut -d" " -f1
17:34:40 [INFO] [ssh.go:12] [ssh][192.168.160.243] md5sum /var/lib/etcd-restore-dev-k8s-master.tar | cut -d" " -f1
17:34:40 [DEBG] [ssh.go:24] [ssh][192.168.160.243]command result is: bc76f9bb1aea210fb815a43aed27aa29
17:34:40 [DEBG] [scp.go:27] [ssh]host: 192.168.160.243 , remote md5: bc76f9bb1aea210fb815a43aed27aa29
17:34:40 [INFO] [scp.go:31] [ssh]md5 validate true
17:34:40 [INFO] [download.go:38] [192.168.160.243]copy file md5 validate success
17:34:40 [DEBG] [download.go:44] [192.168.160.243]please wait for after hook
17:34:40 [INFO] [ssh.go:57] [ssh][192.168.160.243] tar xf /var/lib/ectd-restore-dev-k8s-master.tar -C /var/lib/ && mv /var/lib/ectd-restore-dev-k8s-master /var/lib/etcd && rm -rf /var/lib/ectd-restore-dev-k8s-master.tar
17:34:40 [INFO] [ssh.go:57] [ssh][192.168.160.243] tar xf /var/lib/etcd-restore-dev-k8s-master.tar -C /var/lib/ && mv /var/lib/etcd-restore-dev-k8s-master /var/lib/etcd && rm -rf /var/lib/etcd-restore-dev-k8s-master.tar
17:34:41 [INFO] [etcd.go:145] Start kube-apiserver kube-controller-manager kube-scheduler
17:34:41 [INFO] [ssh.go:105] [ssh][192.168.160.243] cd /tmp && mv /etc/kubernetes/manifestslezSCljV /etc/kubernetes/manifests
17:34:41 [INFO] [etcd.go:148] Wait 60s to health check for etcd
Expand Down
4 changes: 2 additions & 2 deletions install/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ const (

// etcd backup
ETCDSNAPSHOTDEFAULTNAME = "snapshot"
ETCDDEFAULTBACKUPDIR = "/opt/sealos/ectd-backup"
ETCDDEFAULTRESTOREDIR = "/opt/sealos/ectd-restore"
ETCDDEFAULTBACKUPDIR = "/opt/sealos/etcd-backup"
ETCDDEFAULTRESTOREDIR = "/opt/sealos/etcd-restore"
ETCDDATADIR = "/var/lib/etcd"
EtcdCacart = "/root/.sealos/pki/etcd/ca.crt"
EtcdCert = "/root/.sealos/pki/etcd/healthcheck-client.crt"
Expand Down
2 changes: 1 addition & 1 deletion install/etcd_restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (e *EtcdFlags) AfterRestore() error {
// first to mv every
for _, host := range e.EtcdHosts {
hostname := SSHConfig.CmdToString(host, "hostname", "")
// /opt/sealos/ectd-restore-dev-k8s-master
// /opt/sealos/etcd-restore-dev-k8s-master
location := fmt.Sprintf("%s-%s", e.RestoreDir, hostname)
//
tmpFile := fmt.Sprintf("/tmp/%s.tar", filepath.Base(location))
Expand Down
2 changes: 1 addition & 1 deletion install/etcd_save.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func GetCfg(ep []string) (*clientv3.Config, error) {
return cfg, nil
}

func GetEctdClient(ep []string) (*clientv3.Client, error) {
func GetEtcdClient(ep []string) (*clientv3.Client, error) {
var cfgtls *transport.TLSInfo
tlsinfo := transport.TLSInfo{}
tlsinfo.CertFile = EtcdCert
Expand Down

0 comments on commit 82cc023

Please sign in to comment.