diff --git a/pkg/cli/upgradeassistant/cmd/migrate/1100.go b/pkg/cli/upgradeassistant/cmd/migrate/1100.go new file mode 100644 index 0000000000..2b64548237 --- /dev/null +++ b/pkg/cli/upgradeassistant/cmd/migrate/1100.go @@ -0,0 +1,160 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package migrate + +import ( + "context" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + + internalmodels "github.com/koderover/zadig/pkg/cli/upgradeassistant/internal/repository/models" + internalmongodb "github.com/koderover/zadig/pkg/cli/upgradeassistant/internal/repository/mongodb" + "github.com/koderover/zadig/pkg/cli/upgradeassistant/internal/upgradepath" + "github.com/koderover/zadig/pkg/tool/log" + "github.com/koderover/zadig/pkg/types" +) + +func init() { + upgradepath.RegisterHandler("1.9.0", "1.10.0", V190ToV1100) + upgradepath.RegisterHandler("1.10.0", "1.9.0", V1100ToV190) +} + +// V190ToV1100 migrates data `caches` and `pre_build.clean_workspace` fields in `zadig.module_build` and `zadig.module_testing` +// to new fields `cache_enable`, `cache_dir_type` and `cache_user_dir`. +func V190ToV1100() error { + log.Info("Migrate data from 1.9.0 to 1.10.0.") + + log.Info("Migrate data in `zadig.module_build`.") + if err := migrateModuleBuild(); err != nil { + return fmt.Errorf("failed to migrate data in `zadig.module_build`: %s", err) + } + + log.Info("Migrate data in `zadig.module_testing`.") + if err := migrateModuleTesting(); err != nil { + return fmt.Errorf("failed to migrate data in `zadig.module_testing`: %s", err) + } + + return nil +} + +// Since the old data has not been changed, no changes are required. +func V1100ToV190() error { + log.Info("Rollback data from 1.10.0 to 1.9.0") + return nil +} + +func migrateModuleBuild() error { + buildCol := internalmongodb.NewBuildColl() + + builds, err := buildCol.List(&internalmongodb.BuildListOption{}) + if err != nil { + return fmt.Errorf("failed to list all data in `zadig.module_build`: %s", err) + } + + var ms []mongo.WriteModel + for _, build := range builds { + if err := migrateOneBuild(build); err != nil { + log.Errorf("Failed to migrate data: %v. Err: %s", build, err) + continue + } + + ms = append(ms, + mongo.NewUpdateOneModel(). + SetFilter(bson.D{{"_id", build.ID}}). + SetUpdate(bson.D{{"$set", + bson.D{ + {"cache_enable", build.CacheEnable}, + {"cache_dir_type", build.CacheDirType}, + {"cache_user_dir", build.CacheUserDir}}}, + }), + ) + } + + _, err = buildCol.BulkWrite(context.TODO(), ms) + + return err +} + +func migrateModuleTesting() error { + testingCol := internalmongodb.NewTestingColl() + + testings, err := testingCol.List(&internalmongodb.ListTestOption{}) + if err != nil { + return fmt.Errorf("failed to list all data in `zadig.module_testing`: %s", err) + } + + var ms []mongo.WriteModel + for _, testing := range testings { + if err := migrateOneTesting(testing); err != nil { + log.Errorf("Failed to migrate data: %v. Err: %s", testing, err) + continue + } + + ms = append(ms, + mongo.NewUpdateOneModel(). + SetFilter(bson.D{{"_id", testing.ID}}). + SetUpdate(bson.D{{"$set", + bson.D{ + {"cache_enable", testing.CacheEnable}, + {"cache_dir_type", testing.CacheDirType}, + {"cache_user_dir", testing.CacheUserDir}}}, + }), + ) + } + + _, err = testingCol.BulkWrite(context.TODO(), ms) + + return err +} + +func migrateOneBuild(build *internalmodels.Build) error { + if build.PreBuild != nil && build.PreBuild.CleanWorkspace { + return nil + } + + build.CacheEnable = true + + if len(build.Caches) == 0 { + build.CacheDirType = types.WorkspaceCacheDir + return nil + } + + build.CacheDirType = types.UserDefinedCacheDir + build.CacheUserDir = build.Caches[0] + + return nil +} + +func migrateOneTesting(testing *internalmodels.Testing) error { + if testing.PreTest != nil && testing.PreTest.CleanWorkspace { + return nil + } + + testing.CacheEnable = true + + if len(testing.Caches) == 0 { + testing.CacheDirType = types.WorkspaceCacheDir + return nil + } + + testing.CacheDirType = types.UserDefinedCacheDir + testing.CacheUserDir = testing.Caches[0] + + return nil +} diff --git a/pkg/cli/upgradeassistant/internal/repository/models/build.go b/pkg/cli/upgradeassistant/internal/repository/models/build.go new file mode 100644 index 0000000000..5e2d01e498 --- /dev/null +++ b/pkg/cli/upgradeassistant/internal/repository/models/build.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package models + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" + + "github.com/koderover/zadig/pkg/types" +) + +type Build struct { + ID primitive.ObjectID `bson:"_id,omitempty" json:"id,omitempty"` + PreBuild *PreBuild `bson:"pre_build" json:"pre_build"` + + // TODO: Deprecated. + Caches []string `bson:"caches" json:"caches"` + + // New since V1.10.0. + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` +} + +// PreBuild prepares an environment for a job +type PreBuild struct { + // TODO: Deprecated. + CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` +} + +func (Build) TableName() string { + return "module_build" +} diff --git a/pkg/cli/upgradeassistant/internal/repository/models/testing.go b/pkg/cli/upgradeassistant/internal/repository/models/testing.go new file mode 100644 index 0000000000..24d80f1ab4 --- /dev/null +++ b/pkg/cli/upgradeassistant/internal/repository/models/testing.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package models + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" + + "github.com/koderover/zadig/pkg/types" +) + +type Testing struct { + ID primitive.ObjectID `bson:"_id,omitempty" json:"id,omitempty"` + PreTest *PreTest `bson:"pre_test" json:"pre_test"` + + // TODO: Deprecated. + Caches []string `bson:"caches" json:"caches"` + + // New since V1.10.0. + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` +} + +// PreTest prepares an environment for a job +type PreTest struct { + // TODO: Deprecated. + CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` +} + +func (Testing) TableName() string { + return "module_testing" +} diff --git a/pkg/cli/upgradeassistant/internal/repository/mongodb/build.go b/pkg/cli/upgradeassistant/internal/repository/mongodb/build.go new file mode 100644 index 0000000000..b181749d9b --- /dev/null +++ b/pkg/cli/upgradeassistant/internal/repository/mongodb/build.go @@ -0,0 +1,67 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mongodb + +import ( + "context" + "errors" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/koderover/zadig/pkg/cli/upgradeassistant/internal/repository/models" + "github.com/koderover/zadig/pkg/microservice/aslan/config" + mongotool "github.com/koderover/zadig/pkg/tool/mongo" +) + +type BuildListOption struct { +} + +type BuildColl struct { + *mongo.Collection + + coll string +} + +func NewBuildColl() *BuildColl { + name := models.Build{}.TableName() + return &BuildColl{Collection: mongotool.Database(config.MongoDatabase()).Collection(name), coll: name} +} + +func (c *BuildColl) List(opt *BuildListOption) ([]*models.Build, error) { + if opt == nil { + return nil, errors.New("nil ListOption") + } + + query := bson.M{} + + var resp []*models.Build + ctx := context.Background() + opts := options.Find() + cursor, err := c.Collection.Find(ctx, query, opts) + if err != nil { + return nil, err + } + + err = cursor.All(ctx, &resp) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/pkg/cli/upgradeassistant/internal/repository/mongodb/testing.go b/pkg/cli/upgradeassistant/internal/repository/mongodb/testing.go new file mode 100644 index 0000000000..ee420afdf1 --- /dev/null +++ b/pkg/cli/upgradeassistant/internal/repository/mongodb/testing.go @@ -0,0 +1,66 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mongodb + +import ( + "context" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/koderover/zadig/pkg/cli/upgradeassistant/internal/repository/models" + "github.com/koderover/zadig/pkg/microservice/aslan/config" + mongotool "github.com/koderover/zadig/pkg/tool/mongo" +) + +type ListTestOption struct { +} + +type TestingColl struct { + *mongo.Collection + + coll string +} + +func NewTestingColl() *TestingColl { + name := models.Testing{}.TableName() + return &TestingColl{Collection: mongotool.Database(config.MongoDatabase()).Collection(name), coll: name} +} + +func (c *TestingColl) GetCollectionName() string { + return c.coll +} + +func (c *TestingColl) List(opt *ListTestOption) ([]*models.Testing, error) { + query := bson.M{} + + var resp []*models.Testing + ctx := context.Background() + opts := options.Find() + cursor, err := c.Collection.Find(ctx, query, opts) + if err != nil { + return nil, err + } + + err = cursor.All(ctx, &resp) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/pkg/microservice/aslan/core/build/service/build.go b/pkg/microservice/aslan/core/build/service/build.go index 848247e030..d74ac8d550 100644 --- a/pkg/microservice/aslan/core/build/service/build.go +++ b/pkg/microservice/aslan/core/build/service/build.go @@ -109,7 +109,6 @@ func ListBuild(name, targets, productName string, log *zap.SugaredLogger) ([]*Bu } func CreateBuild(username string, build *commonmodels.Build, log *zap.SugaredLogger) error { - if len(build.Name) == 0 { return e.ErrCreateBuildModule.AddDesc("empty name") } @@ -154,7 +153,6 @@ func UpdateBuild(username string, build *commonmodels.Build, log *zap.SugaredLog } func DeleteBuild(name, productName string, log *zap.SugaredLogger) error { - if len(name) == 0 { return e.ErrDeleteBuildModule.AddDesc("empty name") } diff --git a/pkg/microservice/aslan/core/common/repository/models/build.go b/pkg/microservice/aslan/core/common/repository/models/build.go index c9d405e570..0456f17a20 100644 --- a/pkg/microservice/aslan/core/common/repository/models/build.go +++ b/pkg/microservice/aslan/core/common/repository/models/build.go @@ -32,23 +32,32 @@ type Build struct { // 在任一编译配置模板中只能出现一次 // 对于k8s部署是传入容器名称 // 对于物理机部署是服务名称 - Targets []*ServiceModuleTarget `bson:"targets" json:"targets"` - Description string `bson:"desc,omitempty" json:"desc"` - UpdateTime int64 `bson:"update_time" json:"update_time"` - UpdateBy string `bson:"update_by" json:"update_by"` - Repos []*types.Repository `bson:"repos" json:"repos"` - PreBuild *PreBuild `bson:"pre_build" json:"pre_build"` - JenkinsBuild *JenkinsBuild `bson:"jenkins_build,omitempty" json:"jenkins_build,omitempty"` - Scripts string `bson:"scripts" json:"scripts"` - PostBuild *PostBuild `bson:"post_build,omitempty" json:"post_build"` - Caches []string `bson:"caches" json:"caches"` - ProductName string `bson:"product_name" json:"product_name"` - SSHs []string `bson:"sshs,omitempty" json:"sshs,omitempty"` - PMDeployScripts string `bson:"pm_deploy_scripts" json:"pm_deploy_scripts"` + Targets []*ServiceModuleTarget `bson:"targets" json:"targets"` + Description string `bson:"desc,omitempty" json:"desc"` + UpdateTime int64 `bson:"update_time" json:"update_time"` + UpdateBy string `bson:"update_by" json:"update_by"` + Repos []*types.Repository `bson:"repos" json:"repos"` + PreBuild *PreBuild `bson:"pre_build" json:"pre_build"` + JenkinsBuild *JenkinsBuild `bson:"jenkins_build,omitempty" json:"jenkins_build,omitempty"` + Scripts string `bson:"scripts" json:"scripts"` + PostBuild *PostBuild `bson:"post_build,omitempty" json:"post_build"` + + // TODO: Deprecated. + Caches []string `bson:"caches" json:"caches"` + + ProductName string `bson:"product_name" json:"product_name"` + SSHs []string `bson:"sshs,omitempty" json:"sshs,omitempty"` + PMDeployScripts string `bson:"pm_deploy_scripts" json:"pm_deploy_scripts"` + + // New since V1.10.0. + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } // PreBuild prepares an environment for a job type PreBuild struct { + // TODO: Deprecated. CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` // ResReq defines job requested resources ResReq setting.Request `bson:"res_req" json:"res_req"` @@ -68,6 +77,8 @@ type PreBuild struct { // UploadPkg uploads package to s3 UploadPkg bool `bson:"upload_pkg" json:"upload_pkg"` ClusterID string `bson:"cluster_id" json:"cluster_id"` + + // TODO: Deprecated. Namespace string `bson:"namespace" json:"namespace"` } diff --git a/pkg/microservice/aslan/core/common/repository/models/k8s_cluster.go b/pkg/microservice/aslan/core/common/repository/models/k8s_cluster.go index 337bd1b9d5..21043be5a6 100644 --- a/pkg/microservice/aslan/core/common/repository/models/k8s_cluster.go +++ b/pkg/microservice/aslan/core/common/repository/models/k8s_cluster.go @@ -21,6 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) type K8SCluster struct { @@ -41,6 +42,7 @@ type K8SCluster struct { Token string `json:"token" bson:"-"` Provider int8 `json:"provider" bson:"provider"` Local bool `json:"local" bson:"local"` + Cache types.Cache `json:"cache" bson:"cache"` } type K8SClusterResp struct { diff --git a/pkg/microservice/aslan/core/common/repository/models/task/build.go b/pkg/microservice/aslan/core/common/repository/models/task/build.go index 0455c06e32..d5353efcbf 100644 --- a/pkg/microservice/aslan/core/common/repository/models/task/build.go +++ b/pkg/microservice/aslan/core/common/repository/models/task/build.go @@ -61,6 +61,12 @@ type Build struct { EnvHostInfo map[string][]string `bson:"env_host_info,omitempty" json:"env_host_info,omitempty"` ArtifactInfo *ArtifactInfo `bson:"artifact_info,omitempty" json:"artifact_info,omitempty"` ClusterID string `bson:"cluster_id,omitempty" json:"cluster_id,omitempty"` + + // New since V1.10.0. + Cache types.Cache `bson:"cache" json:"cache"` + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } type ArtifactInfo struct { @@ -123,9 +129,11 @@ type DockerBuildStatus struct { } type JobCtx struct { - EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"` - Proxy *models.Proxy `bson:"proxy" json:"proxy"` - CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` + EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"` + Proxy *models.Proxy `bson:"proxy" json:"proxy"` + + // TODO: Deprecated. + CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` // BuildJobCtx Builds []*types.Repository `bson:"builds" json:"builds"` @@ -147,8 +155,7 @@ type JobCtx struct { DockerBuildCtx *DockerBuildCtx `bson:"docker_build_ctx,omitempty" json:"docker_build_ctx,omitempty"` FileArchiveCtx *FileArchiveCtx `bson:"file_archive_ctx,omitempty" json:"file_archive_ctx,omitempty"` // TestType - TestType string `bson:"test_type" json:"test_type"` - // Caches + TestType string `bson:"test_type" json:"test_type"` Caches []string `bson:"caches" json:"caches"` ArtifactPath string `bson:"artifact_path,omitempty" json:"artifact_path,omitempty"` ArtifactPaths []string `bson:"artifact_paths,omitempty" json:"artifact_paths,omitempty"` diff --git a/pkg/microservice/aslan/core/common/repository/models/task/testing.go b/pkg/microservice/aslan/core/common/repository/models/task/testing.go index 0105494f6e..cca8438aab 100644 --- a/pkg/microservice/aslan/core/common/repository/models/task/testing.go +++ b/pkg/microservice/aslan/core/common/repository/models/task/testing.go @@ -22,6 +22,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/aslan/config" "github.com/koderover/zadig/pkg/microservice/aslan/core/common/repository/models" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) type Testing struct { @@ -49,6 +50,12 @@ type Testing struct { Registries []*models.RegistryNamespace `bson:"-" json:"registries"` ClusterID string `bson:"cluster_id,omitempty" json:"cluster_id,omitempty"` Namespace string `bson:"namespace" json:"namespace"` + + // New since V1.10.0. + Cache types.Cache `bson:"cache" json:"cache"` + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } func (t *Testing) ToSubTask() (map[string]interface{}, error) { diff --git a/pkg/microservice/aslan/core/common/repository/models/testing.go b/pkg/microservice/aslan/core/common/repository/models/testing.go index ff0b9649b5..243b6f3546 100644 --- a/pkg/microservice/aslan/core/common/repository/models/testing.go +++ b/pkg/microservice/aslan/core/common/repository/models/testing.go @@ -38,10 +38,13 @@ type Testing struct { // Junit 测试报告 TestResultPath string `bson:"test_result_path" json:"test_result_path"` // html 测试报告 - TestReportPath string `bson:"test_report_path" json:"test_report_path"` - Threshold int `bson:"threshold" json:"threshold"` - TestType string `bson:"test_type" json:"test_type"` - Caches []string `bson:"caches" json:"caches"` + TestReportPath string `bson:"test_report_path" json:"test_report_path"` + Threshold int `bson:"threshold" json:"threshold"` + TestType string `bson:"test_type" json:"test_type"` + + // TODO: Deprecated. + Caches []string `bson:"caches" json:"caches"` + ArtifactPaths []string `bson:"artifact_paths,omitempty" json:"artifact_paths,omitempty"` TestCaseNum int `bson:"-" json:"test_case_num,omitempty"` ExecuteNum int `bson:"-" json:"execute_num,omitempty"` @@ -52,6 +55,11 @@ type Testing struct { HookCtl *TestingHookCtrl `bson:"hook_ctl" json:"hook_ctl"` NotifyCtl *NotifyCtl `bson:"notify_ctl,omitempty" json:"notify_ctl,omitempty"` ScheduleEnabled bool `bson:"schedule_enabled" json:"-"` + + // New since V1.10.0. + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } type TestingHookCtrl struct { @@ -67,7 +75,9 @@ type TestingHook struct { // PreTest prepares an environment for a job type PreTest struct { + // TODO: Deprecated. CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` + // BuildOS defines job image OS, it supports 12.04, 14.04, 16.04 BuildOS string `bson:"build_os" json:"build_os"` ImageFrom string `bson:"image_from" json:"image_from"` @@ -82,7 +92,9 @@ type PreTest struct { // EnableProxy EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"` ClusterID string `bson:"cluster_id" json:"cluster_id"` - Namespace string `bson:"namespace" json:"namespace"` + + // TODO: Deprecated. + Namespace string `bson:"namespace" json:"namespace"` } func (Testing) TableName() string { diff --git a/pkg/microservice/aslan/core/common/repository/mongodb/k8s_cluster.go b/pkg/microservice/aslan/core/common/repository/mongodb/k8s_cluster.go index 926eb912d8..c167026056 100644 --- a/pkg/microservice/aslan/core/common/repository/mongodb/k8s_cluster.go +++ b/pkg/microservice/aslan/core/common/repository/mongodb/k8s_cluster.go @@ -210,6 +210,7 @@ func (c *K8SClusterColl) UpdateMutableFields(cluster *models.K8SCluster, id stri "namespace": cluster.Namespace, "production": cluster.Production, "advanced_config": cluster.AdvancedConfig, + "cache": cluster.Cache, }}, ) diff --git a/pkg/microservice/aslan/core/common/repository/mongodb/s3.go b/pkg/microservice/aslan/core/common/repository/mongodb/s3.go index a79e666cbc..7b5f448abc 100644 --- a/pkg/microservice/aslan/core/common/repository/mongodb/s3.go +++ b/pkg/microservice/aslan/core/common/repository/mongodb/s3.go @@ -18,6 +18,8 @@ package mongodb import ( "context" + "fmt" + "strings" "time" "go.mongodb.org/mongo-driver/bson" @@ -27,6 +29,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/aslan/config" "github.com/koderover/zadig/pkg/microservice/aslan/core/common/repository/models" "github.com/koderover/zadig/pkg/tool/crypto" + "github.com/koderover/zadig/pkg/tool/log" mongotool "github.com/koderover/zadig/pkg/tool/mongo" ) @@ -201,3 +204,46 @@ func (c *S3StorageColl) FindAll() ([]*models.S3Storage, error) { return storages, nil } + +func (c *S3StorageColl) InitData() error { + minioEndpoint := config.S3StorageEndpoint() + endpointInfo := strings.Split(minioEndpoint, ":") + if len(endpointInfo) != 2 { + return fmt.Errorf("invalid endpoint of minio: %s", minioEndpoint) + } + minioEndpoint = fmt.Sprintf("%s.%s.svc.cluster.local:%s", endpointInfo[0], config.Namespace(), endpointInfo[1]) + + // Check whether minio has been integrated. + err := c.FindOne(context.TODO(), bson.M{"endpoint": minioEndpoint}).Err() + if err == nil { + log.Infof("Has found %s.", minioEndpoint) + return nil + } + if err != mongo.ErrNoDocuments { + return fmt.Errorf("failed to operate on mongodb: %s", err) + } + + // Check whether there's default S3 system. + var setDefault bool + err = c.FindOne(context.TODO(), bson.M{"is_default": true}).Err() + if err != nil { + if err != mongo.ErrNoDocuments { + return fmt.Errorf("failed to operate on mongodb: %s", err) + } + + setDefault = true + } + + minioStorage := models.S3Storage{ + Ak: config.S3StorageAK(), + Sk: config.S3StorageSK(), + Endpoint: minioEndpoint, + Bucket: config.S3StorageBucket(), + IsDefault: setDefault, + Insecure: true, + Provider: 0, + } + log.Infof("Begin to integrate minio storage: %s", minioEndpoint) + + return c.Create(&minioStorage) +} diff --git a/pkg/microservice/aslan/core/multicluster/handler/cache.go b/pkg/microservice/aslan/core/multicluster/handler/cache.go new file mode 100644 index 0000000000..4b9e1ba09d --- /dev/null +++ b/pkg/microservice/aslan/core/multicluster/handler/cache.go @@ -0,0 +1,37 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "github.com/gin-gonic/gin" + "github.com/koderover/zadig/pkg/microservice/aslan/core/multicluster/service" + internalhandler "github.com/koderover/zadig/pkg/shared/handler" +) + +func ListStorageClasses(c *gin.Context) { + ctx := internalhandler.NewContext(c) + defer func() { internalhandler.JSONResponse(c, ctx) }() + + ctx.Resp, ctx.Err = service.ListStorageClasses(c, c.Param("id")) +} + +func ListPVCs(c *gin.Context) { + ctx := internalhandler.NewContext(c) + defer func() { internalhandler.JSONResponse(c, ctx) }() + + ctx.Resp, ctx.Err = service.ListPVCs(c, c.Param("id"), c.Param("namespace")) +} diff --git a/pkg/microservice/aslan/core/multicluster/handler/clusters.go b/pkg/microservice/aslan/core/multicluster/handler/clusters.go index 1c9f455434..1ae09c4c2d 100644 --- a/pkg/microservice/aslan/core/multicluster/handler/clusters.go +++ b/pkg/microservice/aslan/core/multicluster/handler/clusters.go @@ -25,6 +25,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/aslan/core/multicluster/service" internalhandler "github.com/koderover/zadig/pkg/shared/handler" e "github.com/koderover/zadig/pkg/tool/errors" + "github.com/koderover/zadig/pkg/tool/log" ) func ListClusters(c *gin.Context) { @@ -75,11 +76,13 @@ func UpdateCluster(c *gin.Context) { args := new(service.K8SCluster) if err := c.BindJSON(args); err != nil { ctx.Err = e.ErrInvalidParam.AddErr(err) + log.Errorf("Failed to bind data: %s", err) return } if err := args.Clean(); err != nil { ctx.Err = e.ErrInvalidParam.AddErr(err) + log.Errorf("Failed to clean args: %s", err) return } diff --git a/pkg/microservice/aslan/core/multicluster/handler/router.go b/pkg/microservice/aslan/core/multicluster/handler/router.go index 22438e9552..4f02a1e830 100644 --- a/pkg/microservice/aslan/core/multicluster/handler/router.go +++ b/pkg/microservice/aslan/core/multicluster/handler/router.go @@ -45,4 +45,7 @@ func (*Router) Inject(router *gin.RouterGroup) { { bundles.GET("", GetBundleResources) } + + router.GET("/:id/storageclasses", ListStorageClasses) + router.GET("/:id/:namespace/pvcs", ListPVCs) } diff --git a/pkg/microservice/aslan/core/multicluster/service/cache.go b/pkg/microservice/aslan/core/multicluster/service/cache.go new file mode 100644 index 0000000000..52422b58ca --- /dev/null +++ b/pkg/microservice/aslan/core/multicluster/service/cache.go @@ -0,0 +1,111 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/pkg/microservice/aslan/config" + "github.com/koderover/zadig/pkg/setting" + kubeclient "github.com/koderover/zadig/pkg/shared/kube/client" + "github.com/koderover/zadig/pkg/tool/log" +) + +type PVC struct { + Name string `json:"name"` + StorageSizeInBytes int64 `json:"storage_size_in_bytes"` +} + +func ListStorageClasses(ctx context.Context, clusterID string) ([]string, error) { + kclient, err := kubeclient.GetKubeClient(config.HubServerAddress(), clusterID) + if err != nil { + return nil, fmt.Errorf("failed to get kube client: %s", err) + } + + var namespace string + switch clusterID { + case setting.LocalClusterID: + // For cluster-level resources, we need to explicitly configure the namespace to be empty. + namespace = "" + default: + if namespace != AttachedClusterNamespace { + return nil, fmt.Errorf("invalid namespace in attached cluster: %s. Valid: %s", namespace, AttachedClusterNamespace) + } + } + + scList := &storagev1.StorageClassList{} + err = kclient.List(ctx, scList, client.InNamespace(namespace)) + if err != nil { + return nil, fmt.Errorf("failed to list storageclasses: %s", err) + } + log.Infof("StorageClass Length: %d", len(scList.Items)) + + storageclasses := make([]string, 0, len(scList.Items)) + for _, item := range scList.Items { + if !StorageProvisioner(item.Provisioner).IsNFS() { + continue + } + + storageclasses = append(storageclasses, item.Name) + } + + return storageclasses, nil +} + +func ListPVCs(ctx context.Context, clusterID, namespace string) ([]PVC, error) { + kclient, err := kubeclient.GetKubeClient(config.HubServerAddress(), clusterID) + if err != nil { + return nil, fmt.Errorf("failed to get kube client: %s", err) + } + + switch clusterID { + case setting.LocalClusterID: + // For now caller may not know exactly which namespace Zadig is deployed to, so a correction is made here. + namespace = config.Namespace() + default: + if namespace != AttachedClusterNamespace { + return nil, fmt.Errorf("invalid namespace in attached cluster: %s. Valid: %s", namespace, AttachedClusterNamespace) + } + } + + pvcList := &corev1.PersistentVolumeClaimList{} + err = kclient.List(ctx, pvcList, client.InNamespace(namespace)) + if err != nil { + return nil, err + } + log.Infof("PVC Length: %d", len(pvcList.Items)) + + pvcs := make([]PVC, 0, len(pvcList.Items)) + for _, item := range pvcList.Items { + // Don't support `corev1.PersistentVolumeBlock` because the PVC would be mounted on multiple nodes. + if item.Spec.VolumeMode != nil && *item.Spec.VolumeMode == corev1.PersistentVolumeBlock { + continue + } + + pvcs = append(pvcs, PVC{ + Name: item.Name, + StorageSizeInBytes: item.Spec.Resources.Requests.Storage().Value(), + }) + } + + return pvcs, nil +} diff --git a/pkg/microservice/aslan/core/multicluster/service/clusters.go b/pkg/microservice/aslan/core/multicluster/service/clusters.go index b63832375c..d3b6e75f1f 100644 --- a/pkg/microservice/aslan/core/multicluster/service/clusters.go +++ b/pkg/microservice/aslan/core/multicluster/service/clusters.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "fmt" "net/http" "regexp" @@ -25,7 +26,11 @@ import ( "github.com/gin-gonic/gin" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" configbase "github.com/koderover/zadig/pkg/config" "github.com/koderover/zadig/pkg/microservice/aslan/config" @@ -33,7 +38,9 @@ import ( commonrepo "github.com/koderover/zadig/pkg/microservice/aslan/core/common/repository/mongodb" "github.com/koderover/zadig/pkg/microservice/aslan/core/common/service/kube" "github.com/koderover/zadig/pkg/setting" + kubeclient "github.com/koderover/zadig/pkg/shared/kube/client" e "github.com/koderover/zadig/pkg/tool/errors" + "github.com/koderover/zadig/pkg/types" ) var namePattern = regexp.MustCompile(`^[0-9a-zA-Z_.-]{1,32}$`) @@ -49,6 +56,7 @@ type K8SCluster struct { CreatedBy string `json:"createdBy"` Provider int8 `json:"provider"` Local bool `json:"local"` + Cache types.Cache `json:"cache"` } type AdvancedConfig struct { @@ -117,6 +125,7 @@ func ListClusters(ids []string, projectName string, logger *zap.SugaredLogger) ( Provider: c.Provider, Local: c.Local, AdvancedConfig: advancedConfig, + Cache: c.Cache, }) } @@ -219,11 +228,91 @@ func UpdateCluster(id string, args *K8SCluster, logger *zap.SugaredLogger) (*com } } } + + // If the user does not configure a cache, object storage is used by default. + if args.Cache.MediumType == "" { + args.Cache.MediumType = types.ObjectMedium + + defaultStorage, err := commonrepo.NewS3StorageColl().FindDefault() + if err != nil { + return nil, fmt.Errorf("failed to find default object storage: %s", err) + } + + args.Cache.ObjectProperties = types.ObjectProperties{ + ID: defaultStorage.ID.Hex(), + } + } + + // If the user chooses to use dynamically generated storage resources, the system automatically creates the PVC. + // TODO: If the PVC is not successfully bound to the PV, it is necessary to consider how to expose this abnormal information. + // Depends on product design. + if args.Cache.MediumType == types.NFSMedium && args.Cache.NFSProperties.ProvisionType == types.DynamicProvision { + kclient, err := kubeclient.GetKubeClient(config.HubServerAddress(), id) + if err != nil { + return nil, fmt.Errorf("failed to get kube client: %s", err) + } + + var namespace string + switch id { + case setting.LocalClusterID: + namespace = config.Namespace() + default: + namespace = AttachedClusterNamespace + } + + pvcName := fmt.Sprintf("cache-%s-%d", args.Cache.NFSProperties.StorageClass, args.Cache.NFSProperties.StorageSizeInGiB) + pvc := &corev1.PersistentVolumeClaim{} + err = kclient.Get(context.TODO(), client.ObjectKey{ + Name: pvcName, + Namespace: namespace, + }, pvc) + if err == nil { + logger.Infof("PVC %s eixsts in %s", pvcName, namespace) + args.Cache.NFSProperties.PVC = pvcName + } else if !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to find PVC %s in %s: %s", pvcName, namespace, err) + } else { + filesystemVolume := corev1.PersistentVolumeFilesystem + storageQuantity, err := resource.ParseQuantity(fmt.Sprintf("%dGi", args.Cache.NFSProperties.StorageSizeInGiB)) + if err != nil { + return nil, fmt.Errorf("failed to parse storage size: %d. err: %s", args.Cache.NFSProperties.StorageSizeInGiB, err) + } + + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &args.Cache.NFSProperties.StorageClass, + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + VolumeMode: &filesystemVolume, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: storageQuantity, + }, + }, + }, + } + + err = kclient.Create(context.TODO(), pvc) + if err != nil { + return nil, fmt.Errorf("failed to create PVC %s in %s: %s", pvcName, namespace, err) + } + + logger.Infof("Successfully create PVC %s in %s", pvcName, namespace) + args.Cache.NFSProperties.PVC = pvcName + } + } + cluster := &commonmodels.K8SCluster{ Name: args.Name, Description: args.Description, AdvancedConfig: advancedConfig, Production: args.Production, + Cache: args.Cache, } return s.UpdateCluster(id, cluster, logger) } diff --git a/pkg/microservice/aslan/core/multicluster/service/constants.go b/pkg/microservice/aslan/core/multicluster/service/constants.go new file mode 100644 index 0000000000..341ca05cbb --- /dev/null +++ b/pkg/microservice/aslan/core/multicluster/service/constants.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import "strings" + +// AttachedClusterNamespace is the namespace Zadig uses in attached cluster. +// Note: **Restricted because of product design since v1.9.0**. +const AttachedClusterNamespace = "koderover-agent" + +// StorageProvisioner is a storage type +type StorageProvisioner string + +const ( + // AliCloudNASProvisioner is the provisioner of NFS storage in AliCloud. + AliCloudNASProvisioner StorageProvisioner = "nasplugin.csi.alibabacloud.com" + + // TencentCloudCFSProvisioner is the provisioner of NFS storage in TencentCloud. + TencentCloudCFSProvisioner StorageProvisioner = "com.tencent.cloud.csi.cfs" + + // HuaweiCloudSFSProvisioner is the provisioner of SFS storage in HuaweiCloud. + HuaweiCloudSFSProvisioner StorageProvisioner = "sfsturbo.csi.everest.io" + + // HuaweiCloudNASProvisioner is the provisioner of NAS storage in HuaweiCloud. + HuaweiCloudNASProvisioner StorageProvisioner = "nas.csi.everest.io" + + // AWSEFSProvisioner is the provisioner of EFS storage in AWS. + AWSEFSProvisioner StorageProvisioner = "efs.csi.aws.com" +) + +// Note: For the convenience of users, we filter the storage types for known cloud vendors to prevent users from mistakenly selecting storage +// types that do not support the ``ReadWriteMany`` policy. +// If the user builds their own storage, it is not checked because there is no clear information to judge. +func (s StorageProvisioner) IsNFS() bool { + if strings.Contains(string(s), "alibabacloud.com") && s != AliCloudNASProvisioner { + return false + } + if strings.Contains(string(s), "com.tencent.cloud") && s != TencentCloudCFSProvisioner { + return false + } + if strings.Contains(string(s), "everest.io") && !(s == HuaweiCloudSFSProvisioner || s == HuaweiCloudNASProvisioner) { + return false + } + if strings.Contains(string(s), "aws.com") && s != AWSEFSProvisioner { + return false + } + + return true +} diff --git a/pkg/microservice/aslan/core/service.go b/pkg/microservice/aslan/core/service.go index d06114f69d..63fbef62ae 100644 --- a/pkg/microservice/aslan/core/service.go +++ b/pkg/microservice/aslan/core/service.go @@ -226,6 +226,10 @@ func initDatabase() { // 初始化数据 commonrepo.NewInstallColl().InitInstallData(systemservice.InitInstallMap()) commonrepo.NewBasicImageColl().InitBasicImageData(systemservice.InitbasicImageInfos()) + + if err := commonrepo.NewS3StorageColl().InitData(); err != nil { + log.Warnf("Failed to init S3 data: %s", err) + } } type indexer interface { diff --git a/pkg/microservice/aslan/core/workflow/service/workflow/pipeline_task.go b/pkg/microservice/aslan/core/workflow/service/workflow/pipeline_task.go index a91e6512db..8da4cf68a0 100644 --- a/pkg/microservice/aslan/core/workflow/service/workflow/pipeline_task.go +++ b/pkg/microservice/aslan/core/workflow/service/workflow/pipeline_task.go @@ -635,6 +635,12 @@ func RestartPipelineTaskV2(userName string, taskID int64, pipelineName string, t func TestArgsToTestSubtask(args *commonmodels.TestTaskArgs, pt *task.Task, log *zap.SugaredLogger) (*task.Testing, error) { var resp *task.Testing + testTask := &task.Testing{ + TaskType: config.TaskTestingV2, + Enabled: true, + TestName: "test", + } + allTestings, err := commonrepo.NewTestingColl().List(&commonrepo.ListTestOption{ProductName: args.ProductName, TestType: ""}) if err != nil { log.Errorf("testArgsToTestSubtask TestingModule.List error: %v", err) @@ -657,6 +663,17 @@ func TestArgsToTestSubtask(args *commonmodels.TestTaskArgs, pt *task.Task, log * } testArg.TestModuleName = args.TestName + + testTask.CacheEnable = testing.CacheEnable + testTask.CacheDirType = testing.CacheDirType + testTask.CacheUserDir = testing.CacheUserDir + + clusterInfo, err := commonrepo.NewK8SClusterColl().Get(testing.PreTest.ClusterID) + if err != nil { + return resp, e.ErrListTestModule.AddDesc(err.Error()) + } + testTask.Cache = clusterInfo.Cache + break } } @@ -666,12 +683,7 @@ func TestArgsToTestSubtask(args *commonmodels.TestTaskArgs, pt *task.Task, log * log.Errorf("[%s]get TestingModule error: %v", args.TestName, err) return resp, err } - testTask := &task.Testing{ - TaskType: config.TaskTestingV2, - Enabled: true, - TestName: "test", - Timeout: testModule.Timeout, - } + testTask.Timeout = testModule.Timeout testTask.TestModuleName = testModule.Name testTask.JobCtx.TestType = testModule.TestType diff --git a/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task.go b/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task.go index a7e46d4b42..1d02a035cb 100644 --- a/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task.go +++ b/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task.go @@ -1852,6 +1852,16 @@ func BuildModuleToSubTasks(args *commonmodels.BuildModuleArgs, log *zap.SugaredL ClusterID: module.PreBuild.ClusterID, } + clusterInfo, err := commonrepo.NewK8SClusterColl().Get(module.PreBuild.ClusterID) + if err != nil { + return nil, e.ErrConvertSubTasks.AddErr(err) + } + + build.Cache = clusterInfo.Cache + build.CacheEnable = module.CacheEnable + build.CacheDirType = module.CacheDirType + build.CacheUserDir = module.CacheUserDir + if args.TaskType != "" { build.TaskType = config.TaskArtifactDeploy } diff --git a/pkg/microservice/aslan/core/workflow/testing/handler/test_task.go b/pkg/microservice/aslan/core/workflow/testing/handler/test_task.go index 099cebcf98..ae15442849 100644 --- a/pkg/microservice/aslan/core/workflow/testing/handler/test_task.go +++ b/pkg/microservice/aslan/core/workflow/testing/handler/test_task.go @@ -62,7 +62,6 @@ func CreateTestTask(c *gin.Context) { } ctx.Resp, ctx.Err = service.CreateTestTask(args, ctx.Logger) - // 发送通知 if ctx.Err != nil { commonservice.SendFailedTaskMessage(ctx.UserName, args.ProductName, args.TestName, ctx.RequestID, config.TestType, ctx.Err, ctx.Logger) } diff --git a/pkg/microservice/aslan/core/workflow/testing/service/test_task.go b/pkg/microservice/aslan/core/workflow/testing/service/test_task.go index 30cbb7636e..af20608557 100644 --- a/pkg/microservice/aslan/core/workflow/testing/service/test_task.go +++ b/pkg/microservice/aslan/core/workflow/testing/service/test_task.go @@ -57,7 +57,6 @@ func CreateTestTask(args *commonmodels.TestTaskArgs, log *zap.SugaredLogger) (*C return nil, e.ErrGetCounter.AddDesc(err.Error()) } - // 获取全局configpayload configPayload := commonservice.GetConfigPayload(0) defaultS3Store, err := s3.FindDefaultS3() diff --git a/pkg/microservice/reaper/core/service/archive/file_archive.go b/pkg/microservice/reaper/core/service/archive/file_archive.go index 5857925a1f..6124542d5c 100644 --- a/pkg/microservice/reaper/core/service/archive/file_archive.go +++ b/pkg/microservice/reaper/core/service/archive/file_archive.go @@ -244,7 +244,6 @@ func (c *WorkspaceAchiever) Achieve(target string) ([]string, error) { } return c.paths, nil - } // The path contains shell variables, use linux's own ability to render diff --git a/pkg/microservice/reaper/core/service/meta/types.go b/pkg/microservice/reaper/core/service/meta/types.go index 574258ac84..60f4bd6152 100644 --- a/pkg/microservice/reaper/core/service/meta/types.go +++ b/pkg/microservice/reaper/core/service/meta/types.go @@ -25,15 +25,16 @@ import ( "github.com/koderover/zadig/pkg/microservice/reaper/config" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) -// Context ... type Context struct { // API token 服务访问使用的api token APIToken string `yaml:"api_token"` // Workspace 容器工作目录 [必填] Workspace string `yaml:"workspace"` + // TODO: Deprecated. // CleanWorkspace 是否清理工作目录 [选填, 默认为 false] CleanWorkspace bool `yaml:"clean_workspace"` @@ -117,6 +118,12 @@ type Context struct { ArtifactInfo *ArtifactInfo `yaml:"artifact_info"` ArtifactPath string `yaml:"artifact_path"` AesKey string `yaml:"aes_key"` + + // New since V1.10.0. + CacheEnable bool `yaml:"cache_enable"` + Cache types.Cache `yaml:"cache"` + CacheDirType types.CacheDirType `yaml:"cache_dir_type"` + CacheUserDir string `yaml:"cache_user_dir"` } type ArtifactInfo struct { diff --git a/pkg/microservice/reaper/core/service/reaper/git.go b/pkg/microservice/reaper/core/service/reaper/git.go index 01fdeed925..089c4cd3e2 100644 --- a/pkg/microservice/reaper/core/service/reaper/git.go +++ b/pkg/microservice/reaper/core/service/reaper/git.go @@ -162,7 +162,14 @@ func (r *Reaper) buildGitCommands(repo *meta.Repo) []*c.Command { } // 预防非正常退出导致git被锁住 - _ = os.Remove(path.Join(workDir, "/.git/index.lock")) + indexLockPath := path.Join(workDir, "/.git/index.lock") + if err := os.RemoveAll(indexLockPath); err != nil { + log.Errorf("Failed to remove %s: %s", indexLockPath, err) + } + shallowLockPath := path.Join(workDir, "/.git/shallow.lock") + if err := os.RemoveAll(shallowLockPath); err != nil { + log.Errorf("Failed to remove %s: %s", shallowLockPath, err) + } if isDirEmpty(filepath.Join(workDir, ".git")) { cmds = append(cmds, &c.Command{Cmd: c.InitGit(workDir)}) diff --git a/pkg/microservice/reaper/core/service/reaper/reaper.go b/pkg/microservice/reaper/core/service/reaper/reaper.go index 44f017b7e7..98251195a9 100644 --- a/pkg/microservice/reaper/core/service/reaper/reaper.go +++ b/pkg/microservice/reaper/core/service/reaper/reaper.go @@ -31,21 +31,18 @@ import ( "gopkg.in/yaml.v3" "github.com/koderover/zadig/pkg/microservice/reaper/config" - "github.com/koderover/zadig/pkg/microservice/reaper/core/service/archive" "github.com/koderover/zadig/pkg/microservice/reaper/core/service/meta" "github.com/koderover/zadig/pkg/setting" "github.com/koderover/zadig/pkg/tool/log" + "github.com/koderover/zadig/pkg/types" "github.com/koderover/zadig/pkg/util/fs" ) const ( - // ReadmeScriptFile ... ReadmeScriptFile = "readme_script.sh" - // ReadmeFile ... - ReadmeFile = "/tmp/README" + ReadmeFile = "/tmp/README" ) -// Reaper ... type Reaper struct { Ctx *meta.Context StartTime time.Time @@ -82,33 +79,23 @@ func (r *Reaper) GetCacheFile() string { return filepath.Join(r.Ctx.Workspace, "reaper.tar.gz") } -func (r *Reaper) archiveCustomCaches(wd, dest string, caches []string) ([]string, error) { - fileAchiever := archive.NewWorkspaceAchiever(r.Ctx.StorageURI, r.Ctx.PipelineName, r.Ctx.ServiceName, wd, r.Ctx.AesKey, caches, []string{}, r.getUserEnvs()) - - // list files matches caches - return fileAchiever.Achieve(dest) -} - func (r *Reaper) CompressCache(storageURI string) error { err := r.EnsureActiveWorkspace(r.ActiveWorkspace) if err != nil { log.Errorf("EnsureActiveWorkspace err:%v", err) return err } - if len(r.Ctx.Caches) > 0 { - log.Infof("custom caches will be cached") - caches, err := r.archiveCustomCaches(r.ActiveWorkspace, r.GetCacheFile(), r.Ctx.Caches) - if err != nil { - return err - } - log.Infof("succeed to cache [%s]", strings.Join(caches, ",")) - } else { - log.Infof("workspace will be cached in background") - if err := r.cm.Archive(r.ActiveWorkspace, r.GetCacheFile()); err != nil { - return err - } - log.Info("succeed to cache workspace") + + cacheDir := "/workspace" + if r.Ctx.CacheDirType == types.UserDefinedCacheDir { + cacheDir = r.Ctx.CacheUserDir + } + + log.Infof("Data in `%s` will be cached.", cacheDir) + if err := r.cm.Archive(cacheDir, r.GetCacheFile()); err != nil { + return fmt.Errorf("failed to cache %s: %s", cacheDir, err) } + log.Infof("Succeed to cache %s", cacheDir) // remove workspace err = os.RemoveAll(r.ActiveWorkspace) @@ -141,28 +128,23 @@ func (r *Reaper) EnsureActiveWorkspace(workspace string) error { r.ActiveWorkspace = tempWorkspace return os.Chdir(r.ActiveWorkspace) } + err := os.MkdirAll(workspace, os.ModePerm) if err != nil { return fmt.Errorf("failed to create workspace: %v", err) } r.ActiveWorkspace = workspace + return os.Chdir(r.ActiveWorkspace) } -// BeforeExec ... func (r *Reaper) BeforeExec() error { - workspace := "/workspace" + r.StartTime = time.Now() + workspace := "/workspace" if r.Ctx.ClassicBuild { workspace = r.Ctx.Workspace } - - r.StartTime = time.Now() - - if err := os.RemoveAll(workspace); err != nil { - log.Warning(err.Error()) - } - if err := r.EnsureActiveWorkspace(workspace); err != nil { return err } @@ -175,7 +157,6 @@ func (r *Reaper) BeforeExec() error { time.Sleep(time.Second * 1) } - // 检查是否需要登录docker registry if r.Ctx.DockerRegistry != nil { if r.Ctx.DockerRegistry.UserName != "" { log.Infof("login docker registry %s", r.Ctx.DockerRegistry.Host) @@ -190,39 +171,26 @@ func (r *Reaper) BeforeExec() error { } } - // CleanWorkspace=True 意思是不使用缓存,ResetCache=True 意思是当次工作流不使用缓存 - // 如果 CleanWorkspace=True,永远不使用缓存 - // 如果 CleanWorkspace=False,本次工作流 ResetCache=False,使用缓存;本次工作流 ResetCache=True,不使用缓存 - // TODO: CleanWorkspace 和 ResetCache 严重词不达意,需要改成更合理的值 - if !r.Ctx.CleanWorkspace && !r.Ctx.ResetCache { - // 恢复缓存 - //if _, err := os.Stat(r.GetCacheFile()); err == nil { - // 解压缓存 + if r.Ctx.CacheEnable && r.Ctx.Cache.MediumType == types.ObjectMedium { log.Info("extracting workspace ...") if err := r.DecompressCache(); err != nil { - log.Infof("no previous cache is found: %v", err) - //if err = os.Remove(r.GetCacheFile()); err != nil { - // log.Warningf("failed to remove cache file %s: %v", r.GetCacheFile(), err) - //} + // If the workflow runs for the first time, there may be no cache. + log.Infof("no previous cache is found: %s", err) } else { log.Info("succeed to extract workspace") } - //} } - // 创建SSH目录 if err := os.MkdirAll(path.Join(os.Getenv("HOME"), "/.ssh"), os.ModePerm); err != nil { return fmt.Errorf("create ssh folder error: %v", err) } - // 创建发布目录 if r.Ctx.Archive != nil && len(r.Ctx.Archive.Dir) > 0 { if err := os.MkdirAll(r.Ctx.Archive.Dir, os.ModePerm); err != nil { return fmt.Errorf("create DistDir error: %v", err) } } - // 检查是否需要配置Gitub/Gitlab if r.Ctx.Git != nil { if err := r.Ctx.Git.WriteGithubSSHFile(); err != nil { return fmt.Errorf("write github ssh file error: %v", err) @@ -241,14 +209,13 @@ func (r *Reaper) BeforeExec() error { } } - // 清理测试目录 if r.Ctx.GinkgoTest != nil && len(r.Ctx.GinkgoTest.ResultPath) > 0 { r.Ctx.GinkgoTest.ResultPath = filepath.Join(r.ActiveWorkspace, r.Ctx.GinkgoTest.ResultPath) log.Infof("clean test result path %s", r.Ctx.GinkgoTest.ResultPath) if err := os.RemoveAll(r.Ctx.GinkgoTest.ResultPath); err != nil { log.Warning(err.Error()) } - // 创建测试目录 + if err := os.MkdirAll(r.Ctx.GinkgoTest.ResultPath, os.ModePerm); err != nil { return fmt.Errorf("create test result path error: %v", err) } @@ -342,25 +309,19 @@ func (r *Reaper) prepareDockerfile() error { return nil } -// Exec ... func (r *Reaper) Exec() error { - - // 运行安装脚本 if err := r.runIntallationScripts(); err != nil { return err } - // 运行Git命令 if err := r.runGitCmds(); err != nil { return err } - // 生成Git commits信息 if err := r.createReadme(ReadmeFile); err != nil { log.Warningf("create readme file error: %v", err) } - // 运行用户脚本 if err := r.runScripts(); err != nil { return err } @@ -450,10 +411,10 @@ func (r *Reaper) AfterExec(upStreamErr error) error { return err } - // Upload workspace cache if user uses workspace cache. + // Upload workspace cache if the user turns on caching and uses object storage. // Note: Whether the cache is uploaded successfully or not cannot hinder the progress of the overall process, // so only exceptions are printed here and the process is not interrupted. - if !r.Ctx.CleanWorkspace && !r.Ctx.ResetCache { + if r.Ctx.CacheEnable && r.Ctx.Cache.MediumType == types.ObjectMedium { if err := r.CompressCache(r.Ctx.StorageURI); err != nil { log.Warnf("Failed to run compress cache: %s", err) } diff --git a/pkg/microservice/reaper/core/service/reaper/script.go b/pkg/microservice/reaper/core/service/reaper/script.go index dcc880102a..be31a94689 100644 --- a/pkg/microservice/reaper/core/service/reaper/script.go +++ b/pkg/microservice/reaper/core/service/reaper/script.go @@ -150,7 +150,6 @@ func (r *Reaper) runIntallationScripts() error { } func (r *Reaper) createReadme(file string) error { - if r.Ctx.Archive == nil || len(r.Ctx.Repos) == 0 { return nil } diff --git a/pkg/microservice/warpdrive/core/service/taskcontroller/task_handler.go b/pkg/microservice/warpdrive/core/service/taskcontroller/task_handler.go index 7ef555b129..5a8126861b 100644 --- a/pkg/microservice/warpdrive/core/service/taskcontroller/task_handler.go +++ b/pkg/microservice/warpdrive/core/service/taskcontroller/task_handler.go @@ -45,7 +45,6 @@ var ( xl *zap.SugaredLogger ) -// ExecHandler ... // Sender: sender to send ack/notification // TaskPlugins: registered task plugin initiators to initiate specific plugin to execute task type ExecHandler struct { @@ -53,10 +52,8 @@ type ExecHandler struct { TaskPlugins map[config.TaskType]plugins.Initiator } -// CancelHandler ... type CancelHandler struct{} -// HandleMessage ... // Message handler to handle task execution message func (h *ExecHandler) HandleMessage(message *nsq.Message) error { defer func() { @@ -523,7 +520,6 @@ func (h *ExecHandler) executeTask(taskCtx context.Context, plugin plugins.TaskPl } func Logger(pipelineTask *task.Task) *zap.SugaredLogger { - // 初始化Logger l := log.Logger() if pipelineTask != nil { l.With(zap.String(setting.RequestID, pipelineTask.ReqID)) diff --git a/pkg/microservice/warpdrive/core/service/taskplugin/build.go b/pkg/microservice/warpdrive/core/service/taskplugin/build.go index a3a625e132..9594a2abf1 100644 --- a/pkg/microservice/warpdrive/core/service/taskplugin/build.go +++ b/pkg/microservice/warpdrive/core/service/taskplugin/build.go @@ -38,8 +38,7 @@ import ( ) const ( - // BuildTaskV2Timeout ... - BuildTaskV2Timeout = 60 * 60 * 3 // 60 minutes + BuildTaskV2Timeout = 60 * 60 * 3 // 180 minutes ) // InitializeBuildTaskPlugin to initialize build task plugin, and return reference @@ -67,7 +66,6 @@ func (p *BuildTaskPlugin) SetAckFunc(ack func()) { p.ack = ack } -// Init ... func (p *BuildTaskPlugin) Init(jobname, filename string, xl *zap.SugaredLogger) { p.JobName = jobname p.Log = xl @@ -78,17 +76,14 @@ func (p *BuildTaskPlugin) Type() config.TaskType { return p.Name } -// Status ... func (p *BuildTaskPlugin) Status() config.Status { return p.Task.TaskStatus } -// SetStatus ... func (p *BuildTaskPlugin) SetStatus(status config.Status) { p.Task.TaskStatus = status } -// TaskTimeout ... func (p *BuildTaskPlugin) TaskTimeout() int { if p.Task.Timeout == 0 { p.Task.Timeout = BuildTaskV2Timeout @@ -107,6 +102,17 @@ func (p *BuildTaskPlugin) SetBuildStatusCompleted(status config.Status) { //TODO: Binded Archive File logic func (p *BuildTaskPlugin) Run(ctx context.Context, pipelineTask *task.Task, pipelineCtx *task.PipelineCtx, serviceName string) { + if p.Task.CacheEnable && !pipelineTask.ConfigPayload.ResetCache { + pipelineCtx.CacheEnable = true + pipelineCtx.Cache = p.Task.Cache + pipelineCtx.CacheDirType = p.Task.CacheDirType + pipelineCtx.CacheUserDir = p.Task.CacheUserDir + } else { + pipelineCtx.CacheEnable = false + } + + // TODO: Since the namespace field has been used continuously since v1.10.0, the processing logic related to namespace needs to + // be deleted in v1.11.0. p.KubeNamespace = pipelineTask.ConfigPayload.Build.KubeNamespace if p.Task.Namespace != "" { p.KubeNamespace = p.Task.Namespace @@ -304,7 +310,6 @@ func (p *BuildTaskPlugin) Run(ctx context.Context, pipelineTask *task.Task, pipe p.Log.Infof("succeed to create build job %s", p.JobName) } -// Wait ... func (p *BuildTaskPlugin) Wait(ctx context.Context) { status := waitJobEndWithFile(ctx, p.TaskTimeout(), p.KubeNamespace, p.JobName, true, p.kubeClient, p.Log) p.SetBuildStatusCompleted(status) @@ -334,7 +339,6 @@ func (p *BuildTaskPlugin) Wait(ctx context.Context) { p.SetStatus(status) } -// Complete ... func (p *BuildTaskPlugin) Complete(ctx context.Context, pipelineTask *task.Task, serviceName string) { jobLabel := &JobLabel{ PipelineName: pipelineTask.PipelineName, @@ -369,22 +373,20 @@ func (p *BuildTaskPlugin) Complete(ctx context.Context, pipelineTask *task.Task, p.Task.LogFile = p.FileName } -// SetTask ... func (p *BuildTaskPlugin) SetTask(t map[string]interface{}) error { task, err := ToBuildTask(t) if err != nil { return err } p.Task = task + return nil } -// GetTask ... func (p *BuildTaskPlugin) GetTask() interface{} { return p.Task } -// IsTaskDone ... func (p *BuildTaskPlugin) IsTaskDone() bool { if p.Task.TaskStatus != config.StatusCreated && p.Task.TaskStatus != config.StatusRunning { return true @@ -392,7 +394,6 @@ func (p *BuildTaskPlugin) IsTaskDone() bool { return false } -// IsTaskFailed ... func (p *BuildTaskPlugin) IsTaskFailed() bool { if p.Task.TaskStatus == config.StatusFailed || p.Task.TaskStatus == config.StatusTimeout || p.Task.TaskStatus == config.StatusCancelled { return true @@ -400,22 +401,18 @@ func (p *BuildTaskPlugin) IsTaskFailed() bool { return false } -// SetStartTime ... func (p *BuildTaskPlugin) SetStartTime() { p.Task.StartTime = time.Now().Unix() } -// SetEndTime ... func (p *BuildTaskPlugin) SetEndTime() { p.Task.EndTime = time.Now().Unix() } -// IsTaskEnabled ... func (p *BuildTaskPlugin) IsTaskEnabled() bool { return p.Task.Enabled } -// ResetError ... func (p *BuildTaskPlugin) ResetError() { p.Task.Error = "" } diff --git a/pkg/microservice/warpdrive/core/service/taskplugin/job.go b/pkg/microservice/warpdrive/core/service/taskplugin/job.go index 50b2099fbe..cf8505b597 100644 --- a/pkg/microservice/warpdrive/core/service/taskplugin/job.go +++ b/pkg/microservice/warpdrive/core/service/taskplugin/job.go @@ -52,6 +52,7 @@ import ( "github.com/koderover/zadig/pkg/tool/kube/updater" "github.com/koderover/zadig/pkg/tool/log" s3tool "github.com/koderover/zadig/pkg/tool/s3" + commontypes "github.com/koderover/zadig/pkg/types" "github.com/koderover/zadig/pkg/util" ) @@ -166,7 +167,6 @@ func saveContainerLog(pipelineTask *task.Task, namespace, clusterID, fileName st return nil } -// JobCtxBuilder ... type JobCtxBuilder struct { JobName string ArchiveFile string @@ -187,13 +187,12 @@ func replaceWrapLine(script string) string { // BuildReaperContext builds a yaml func (b *JobCtxBuilder) BuildReaperContext(pipelineTask *task.Task, serviceName string) *types.Context { - ctx := &types.Context{ APIToken: pipelineTask.ConfigPayload.APIToken, Workspace: b.PipelineCtx.Workspace, CleanWorkspace: b.JobCtx.CleanWorkspace, IgnoreCache: pipelineTask.ConfigPayload.IgnoreCache, - ResetCache: pipelineTask.ConfigPayload.ResetCache, + // ResetCache: pipelineTask.ConfigPayload.ResetCache, Proxy: &types.Proxy{ Type: pipelineTask.ConfigPayload.Proxy.Type, Address: pipelineTask.ConfigPayload.Proxy.Address, @@ -226,6 +225,14 @@ func (b *JobCtxBuilder) BuildReaperContext(pipelineTask *task.Task, serviceName StorageEndpoint: pipelineTask.StorageEndpoint, AesKey: pipelineTask.ConfigPayload.AesKey, } + + if b.PipelineCtx.CacheEnable && !pipelineTask.ConfigPayload.ResetCache { + ctx.CacheEnable = true + ctx.Cache = b.PipelineCtx.Cache + ctx.CacheDirType = b.PipelineCtx.CacheDirType + ctx.CacheUserDir = b.PipelineCtx.CacheUserDir + } + for _, install := range b.Installs { inst := &types.Install{ // TODO: 之后可以适配 install.Scripts 为[]string @@ -522,6 +529,28 @@ func buildJobWithLinkedNs(taskType config.TaskType, jobImage, jobName, serviceNa }, } + if ctx.CacheEnable && ctx.Cache.MediumType == commontypes.NFSMedium { + volumeName := "build-cache" + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: ctx.Cache.NFSProperties.PVC, + }, + }, + }) + + mountPath := ctx.CacheUserDir + if ctx.CacheDirType == commontypes.WorkspaceCacheDir { + mountPath = "/workspace" + } + + job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + }) + } + if !strings.Contains(jobImage, PredatorPlugin) && !strings.Contains(jobImage, JenkinsPlugin) && !strings.Contains(jobImage, PackagerPlugin) { job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c"} job.Spec.Template.Spec.Containers[0].Args = []string{reaperBootingScript} diff --git a/pkg/microservice/warpdrive/core/service/taskplugin/testing.go b/pkg/microservice/warpdrive/core/service/taskplugin/testing.go index 5afb0e7a8c..b6e4ac04d2 100644 --- a/pkg/microservice/warpdrive/core/service/taskplugin/testing.go +++ b/pkg/microservice/warpdrive/core/service/taskplugin/testing.go @@ -41,7 +41,6 @@ import ( "github.com/koderover/zadig/pkg/util" ) -//InitializeTestTaskPlugin ... func InitializeTestTaskPlugin(taskType config.TaskType) TaskPlugin { return &TestPlugin{ Name: taskType, @@ -64,34 +63,27 @@ func (p *TestPlugin) SetAckFunc(func()) { } const ( - // TestingV2TaskTimeout ... TestingV2TaskTimeout = 60 * 60 // 60 minutes ) -// Init ... func (p *TestPlugin) Init(jobname, filename string, xl *zap.SugaredLogger) { p.JobName = jobname p.FileName = filename - // SetLogger ... p.Log = xl } -// Type ... func (p *TestPlugin) Type() config.TaskType { return p.Name } -// Status ... func (p *TestPlugin) Status() config.Status { return p.Task.TaskStatus } -// SetStatus ... func (p *TestPlugin) SetStatus(status config.Status) { p.Task.TaskStatus = status } -// TaskTimeout ... func (p *TestPlugin) TaskTimeout() int { if p.Task.Timeout == 0 { p.Task.Timeout = TestingV2TaskTimeout @@ -104,6 +96,17 @@ func (p *TestPlugin) TaskTimeout() int { } func (p *TestPlugin) Run(ctx context.Context, pipelineTask *task.Task, pipelineCtx *task.PipelineCtx, serviceName string) { + if p.Task.CacheEnable && !pipelineTask.ConfigPayload.ResetCache { + pipelineCtx.CacheEnable = true + pipelineCtx.Cache = p.Task.Cache + pipelineCtx.CacheDirType = p.Task.CacheDirType + pipelineCtx.CacheUserDir = p.Task.CacheUserDir + } else { + pipelineCtx.CacheEnable = false + } + + // TODO: Since the namespace field has been used continuously since v1.10.0, the processing logic related to namespace needs to + // be deleted in v1.11.0. p.KubeNamespace = pipelineTask.ConfigPayload.Test.KubeNamespace if p.Task.Namespace != "" { p.KubeNamespace = p.Task.Namespace @@ -134,9 +137,8 @@ func (p *TestPlugin) Run(ctx context.Context, pipelineTask *task.Task, pipelineC } } pipelineCtx.DockerHost = pipelineTask.DockerHost - // 重置错误信息 + // Reset error message. p.Task.Error = "" - // 获取测试相关的namespace var linkedNamespace string var envName string if pipelineTask.Type == config.SingleType { @@ -266,13 +268,11 @@ func (p *TestPlugin) Run(ctx context.Context, pipelineTask *task.Task, pipelineC } } -// Wait ... func (p *TestPlugin) Wait(ctx context.Context) { status := waitJobEndWithFile(ctx, p.TaskTimeout(), p.KubeNamespace, p.JobName, true, p.kubeClient, p.Log) p.SetStatus(status) } -// Complete ... func (p *TestPlugin) Complete(ctx context.Context, pipelineTask *task.Task, serviceName string) { pipelineName := pipelineTask.PipelineName pipelineTaskID := pipelineTask.TaskID @@ -494,7 +494,6 @@ func (p *TestPlugin) IsTaskDone() bool { return false } -// IsTaskFailed ... func (p *TestPlugin) IsTaskFailed() bool { if p.Task.TaskStatus == config.StatusFailed || p.Task.TaskStatus == config.StatusTimeout || p.Task.TaskStatus == config.StatusCancelled { return true @@ -502,22 +501,18 @@ func (p *TestPlugin) IsTaskFailed() bool { return false } -// SetStartTime ... func (p *TestPlugin) SetStartTime() { p.Task.StartTime = time.Now().Unix() } -// SetEndTime ... func (p *TestPlugin) SetEndTime() { p.Task.EndTime = time.Now().Unix() } -// IsTaskEnabled .. func (p *TestPlugin) IsTaskEnabled() bool { return p.Task.Enabled } -// ResetError ... func (p *TestPlugin) ResetError() { p.Task.Error = "" } diff --git a/pkg/microservice/warpdrive/core/service/types/reaper.go b/pkg/microservice/warpdrive/core/service/types/reaper.go index 0e2a153831..bfc68dbe20 100644 --- a/pkg/microservice/warpdrive/core/service/types/reaper.go +++ b/pkg/microservice/warpdrive/core/service/types/reaper.go @@ -23,6 +23,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/warpdrive/config" "github.com/koderover/zadig/pkg/microservice/warpdrive/core/service/types/task" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) // Context ... @@ -32,6 +33,7 @@ type Context struct { // Workspace 容器工作目录 [必填] Workspace string `yaml:"workspace"` + // TODO: Deprecated. // CleanWorkspace 是否清理工作目录 [选填, 默认为 false] CleanWorkspace bool `yaml:"clean_workspace"` @@ -115,6 +117,12 @@ type Context struct { ArtifactInfo *ArtifactInfo `yaml:"artifact_info"` ArtifactPath string `yaml:"artifact_path"` AesKey string `yaml:"aes_key"` + + // New since V1.10.0. + CacheEnable bool `yaml:"cache_enable"` + Cache types.Cache `yaml:"cache"` + CacheDirType types.CacheDirType `yaml:"cache_dir_type"` + CacheUserDir string `yaml:"cache_user_dir"` } type ArtifactInfo struct { diff --git a/pkg/microservice/warpdrive/core/service/types/task/build.go b/pkg/microservice/warpdrive/core/service/types/task/build.go index c0e28f0189..386d8b8d6d 100644 --- a/pkg/microservice/warpdrive/core/service/types/task/build.go +++ b/pkg/microservice/warpdrive/core/service/types/task/build.go @@ -21,6 +21,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/warpdrive/config" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) type Build struct { @@ -59,6 +60,12 @@ type Build struct { EnvHostInfo map[string][]string `bson:"env_host_info,omitempty" json:"env_host_info,omitempty"` ArtifactInfo *ArtifactInfo `bson:"artifact_info,omitempty" json:"artifact_info,omitempty"` ClusterID string `bson:"cluster_id,omitempty" json:"cluster_id,omitempty"` + + // New since V1.10.0. + Cache types.Cache `bson:"cache" json:"cache"` + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } type ArtifactInfo struct { @@ -148,9 +155,11 @@ type DockerBuildStatus struct { } type JobCtx struct { - EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"` - Proxy *Proxy `bson:"proxy" json:"proxy"` - CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` + EnableProxy bool `bson:"enable_proxy" json:"enable_proxy"` + Proxy *Proxy `bson:"proxy" json:"proxy"` + + // TODO: Deprecated. + CleanWorkspace bool `bson:"clean_workspace" json:"clean_workspace"` // BuildJobCtx Builds []*Repository `bson:"builds" json:"builds"` @@ -173,8 +182,10 @@ type JobCtx struct { FileArchiveCtx *FileArchiveCtx `bson:"file_archive_ctx,omitempty" json:"file_archive_ctx,omitempty"` // TestType TestType string `bson:"test_type" json:"test_type"` - // Caches + + // TODO: Deprecated. Caches []string `bson:"caches" json:"caches"` + // buildV3 ArtifactPath string `bson:"artifact_path,omitempty" json:"artifact_path,omitempty"` ArtifactPaths []string `bson:"artifact_paths,omitempty" json:"artifact_paths,omitempty"` diff --git a/pkg/microservice/warpdrive/core/service/types/task/testing.go b/pkg/microservice/warpdrive/core/service/types/task/testing.go index d443b7185d..fca2eb8647 100644 --- a/pkg/microservice/warpdrive/core/service/types/task/testing.go +++ b/pkg/microservice/warpdrive/core/service/types/task/testing.go @@ -21,6 +21,7 @@ import ( "github.com/koderover/zadig/pkg/microservice/warpdrive/config" "github.com/koderover/zadig/pkg/setting" + "github.com/koderover/zadig/pkg/types" ) type Testing struct { @@ -48,6 +49,12 @@ type Testing struct { Registries []*RegistryNamespace `bson:"-" json:"registries"` ClusterID string `bson:"cluster_id" json:"cluster_id"` Namespace string `bson:"namespace" json:"namespace"` + + // New since V1.10.0. + Cache types.Cache `bson:"cache" json:"cache"` + CacheEnable bool `bson:"cache_enable" json:"cache_enable"` + CacheDirType types.CacheDirType `bson:"cache_dir_type" json:"cache_dir_type"` + CacheUserDir string `bson:"cache_user_dir" json:"cache_user_dir"` } func (t *Testing) ToSubTask() (map[string]interface{}, error) { diff --git a/pkg/microservice/warpdrive/core/service/types/task/warpdrive.go b/pkg/microservice/warpdrive/core/service/types/task/warpdrive.go index 15a7914577..20c8442076 100644 --- a/pkg/microservice/warpdrive/core/service/types/task/warpdrive.go +++ b/pkg/microservice/warpdrive/core/service/types/task/warpdrive.go @@ -16,6 +16,10 @@ limitations under the License. package task +import ( + "github.com/koderover/zadig/pkg/types" +) + type PipelineCtx struct { DockerHost string Workspace string @@ -23,6 +27,12 @@ type PipelineCtx struct { DockerMountDir string ConfigMapMountDir string MultiRun bool + + // New since V1.10.0. + Cache types.Cache + CacheEnable bool + CacheDirType types.CacheDirType + CacheUserDir string } type JobCtxBuilder struct { diff --git a/pkg/shared/handler/base.go b/pkg/shared/handler/base.go index 860fbaee0c..d2cd03399c 100644 --- a/pkg/shared/handler/base.go +++ b/pkg/shared/handler/base.go @@ -48,6 +48,7 @@ type jwtClaims struct { UID string `json:"uid"` } +// TODO: We need to implement a `context.Context` that conforms to the golang standard library. func NewContext(c *gin.Context) *Context { logger := ginzap.WithContext(c).Sugar() var claims jwtClaims diff --git a/pkg/types/cache.go b/pkg/types/cache.go new file mode 100644 index 0000000000..58ffcf3e49 --- /dev/null +++ b/pkg/types/cache.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type MediumType string + +const ( + ObjectMedium MediumType = "object" + NFSMedium MediumType = "nfs" +) + +type ProvisionType string + +const ( + DynamicProvision ProvisionType = "dynamic" + StaticProvision ProvisionType = "static" +) + +type ObjectProperties struct { + ID string `json:"id" bson:"id"` +} + +type NFSProperties struct { + ProvisionType ProvisionType `json:"provision_type" bson:"provision_type"` + StorageClass string `json:"storage_class" bson:"storage_class"` + StorageSizeInGiB int64 `json:"storage_size_in_gib" bson:"storage_size_in_gib"` + PVC string `json:"pvc" bson:"pvc"` +} + +type Cache struct { + MediumType MediumType `json:"medium_type" bson:"medium_type"` + ObjectProperties ObjectProperties `json:"object_properties" bson:"object_properties"` + NFSProperties NFSProperties `json:"nfs_properties" bson:"nfs_properties"` +} + +type CacheDirType string + +const ( + WorkspaceCacheDir CacheDirType = "workspace" + UserDefinedCacheDir CacheDirType = "user_defined" +)