forked from rancher/cli
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkubectl.go
149 lines (128 loc) · 3.34 KB
/
kubectl.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/rancher/norman/clientbase"
client "github.com/rancher/rancher/pkg/client/generated/management/v3"
"github.com/urfave/cli"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
func KubectlCommand() cli.Command {
return cli.Command{
Name: "kubectl",
Usage: "Run kubectl commands",
Description: "Use the current cluster context to run kubectl commands in the cluster",
Action: runKubectl,
SkipFlagParsing: true,
}
}
func runKubectl(ctx *cli.Context) error {
args := ctx.Args()
if len(args) > 0 && (args[0] == "-h" || args[0] == "--help") {
return cli.ShowCommandHelp(ctx, "kubectl")
}
path, err := exec.LookPath("kubectl")
if err != nil {
return fmt.Errorf("kubectl is required to be set in your path to use this "+
"command. See https://kubernetes.io/docs/tasks/tools/install-kubectl/ "+
"for more info. Error: %s", err.Error())
}
c, err := GetClient(ctx)
if err != nil {
return err
}
config, err := loadConfig(ctx)
if err != nil {
return err
}
currentRancherServer := config.FocusedServer()
if currentRancherServer == nil {
return fmt.Errorf("no focused server")
}
currentToken := currentRancherServer.AccessKey
t, err := c.ManagementClient.Token.ByID(currentToken)
if err != nil {
return err
}
currentUser := t.UserID
kubeConfig, err := getKubeConfigForUser(ctx, currentUser)
if err != nil {
return err
}
var isTokenValid bool
if kubeConfig != nil {
tokenID, err := extractKubeconfigTokenID(*kubeConfig)
if err != nil {
return err
}
isTokenValid, err = validateToken(tokenID, c.ManagementClient.Token)
if err != nil {
return err
}
}
if kubeConfig == nil || !isTokenValid {
cluster, err := getClusterByID(c, c.UserConfig.FocusedCluster())
if err != nil {
return err
}
config, err := c.ManagementClient.Cluster.ActionGenerateKubeconfig(cluster)
if err != nil {
return err
}
kubeConfigBytes := []byte(config.Config)
kubeConfig, err = clientcmd.Load(kubeConfigBytes)
if err != nil {
return err
}
if err := setKubeConfigForUser(ctx, currentUser, kubeConfig); err != nil {
return err
}
}
tmpfile, err := os.CreateTemp("", "rancher-")
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
if err := clientcmd.WriteToFile(*kubeConfig, tmpfile.Name()); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
cmd := exec.Command(path, ctx.Args()...)
cmd.Env = append(os.Environ(), "KUBECONFIG="+tmpfile.Name())
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
if err != nil {
return err
}
return nil
}
func extractKubeconfigTokenID(kubeconfig api.Config) (string, error) {
if len(kubeconfig.AuthInfos) != 1 {
return "", fmt.Errorf("invalid kubeconfig, expected to contain exactly 1 user")
}
var parts []string
for _, val := range kubeconfig.AuthInfos {
parts = strings.Split(val.Token, ":")
if len(parts) != 2 {
return "", fmt.Errorf("failed to parse kubeconfig token")
}
}
return parts[0], nil
}
func validateToken(tokenID string, tokenClient client.TokenOperations) (bool, error) {
token, err := tokenClient.ByID(tokenID)
if err != nil {
if !clientbase.IsNotFound(err) {
return false, err
}
return false, nil
}
return !token.Expired, nil
}