diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..51923538 --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +operator.tgz +cover.out +bin +testbin/* +onpremtest/* +ords/*zip +.gitattributes +.vscode +.gitlab-ci.yml +.DS_Store +# development +.idea +.local diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 722965b9..87d9b60b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,16 @@ # Contributing to This Repository -We welcome your contributions! There are multiple ways to contribute. +Oracle welcomes your contributions! There are multiple ways that you can contribute. ## Opening issues -For bugs or enhancement requests, please file a GitHub issue unless the problem is security-related. When filing a bug, remember that the more specific the bug is, the more likely it is to be fixed. If you think you've found a security -vulnerability, then do not raise a GitHub issue. Instead, follow the instructions in our -[security policy](./SECURITY.md). +For bugs or enhancement requests, please file a GitHub issue, unless the problem is security-related. + +When filing a bug, remember that the more specific the bug is, the more likely it is to be fixed. If you think you've found a security vulnerability, then do not raise a GitHub issue. Instead, follow the instructions in our [security policy](./SECURITY.md). ## Contributing code -We welcome your code contributions. Before submitting code by using a pull request, +Oracle welcomes your code contributions. Before submitting code by using a pull request, you must sign the [Oracle Contributor Agreement][OCA] (OCA), and your commits must include the following line, using the name and e-mail address you used to sign the OCA: ```text @@ -29,22 +29,22 @@ can be accepted. ## Pull request process -1. Ensure there is an issue created to track and discuss the fix or enhancement that you intend to submit. +1. Ensure that there is an issue created to track and discuss the fix or enhancement that you intend to submit. 1. Fork this repository. 1. Create a branch in your fork to implement the changes. Oracle recommends using the issue number as part of your branch name. For example: `1234-fixes` 1. Ensure that any documentation is updated with the changes that are required by your change. -1. Ensure that any samples are updated, if the base image has been changed. +1. If the base image has been changed, then ensure that any examples are updated. 1. Submit the pull request. *Do not leave the pull request blank*. Explain exactly what your changes are meant to do, and provide simple steps to indicate how to validate your changes. Ensure that you reference the issue that you created as well. -1. Before the changes are merged, Oracle will assign the pull request to 2 or 3 people for review. +1. Before the changes are merged, Oracle will assign the pull request to two or three people for review. ## Code of conduct Follow the [Golden Rule](https://en.wikipedia.org/wiki/Golden_Rule). If you'd -like more specific guidelines, see the [Contributor Covenant Code of Conduct][COC]. +like more specific guidelines, then see the [Contributor Covenant Code of Conduct][COC]. [OCA]: https://oca.opensource.oracle.com [COC]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/ diff --git a/Dockerfile b/Dockerfile index 0c181a48..f444d508 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,23 @@ -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Build the manager binary -FROM golang:1.16 as builder +ARG BUILDER_IMG +FROM ${BUILDER_IMG} as builder + +ARG TARGETARCH +# Download golang if INSTALL_GO is set to true +ARG INSTALL_GO +ARG GOLANG_VERSION +RUN if [ "$INSTALL_GO" = "true" ]; then \ + echo -e "\nCurrent Arch: $(arch), Downloading Go for linux/${TARGETARCH}" &&\ + curl -LJO https://go.dev/dl/go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz &&\ + rm -rf /usr/local/go && tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz &&\ + rm go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz; \ + echo "Go Arch: $(/usr/local/go/bin/go env GOARCH)"; \ + fi +ENV PATH=${GOLANG_VERSION:+"${PATH}:/usr/local/go/bin"} WORKDIR /workspace # Copy the Go Modules manifests @@ -22,12 +36,17 @@ COPY LICENSE.txt LICENSE.txt COPY THIRD_PARTY_LICENSES_DOCKER.txt THIRD_PARTY_LICENSES_DOCKER.txt # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o manager main.go -# Use oraclelinux:8-slim as base image to package the manager binary -FROM oraclelinux:8-slim +# Use oraclelinux:9 as base image to package the manager binary +FROM oraclelinux:9 +ARG CI_COMMIT_SHA +ARG CI_COMMIT_BRANCH +ENV COMMIT_SHA=${CI_COMMIT_SHA} \ + COMMIT_BRANCH=${CI_COMMIT_BRANCH} WORKDIR / COPY --from=builder /workspace/manager . +COPY ords/ords_init.sh . RUN useradd -u 1002 nonroot USER nonroot diff --git a/LICENSE.txt b/LICENSE.txt index 4ac08f59..56dbf679 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2021 Oracle and/or its affiliates. +Copyright (c) 2022 Oracle and/or its affiliates. The Universal Permissive License (UPL), Version 1.0 diff --git a/Makefile b/Makefile index a0bd6870..b9755e6f 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2025, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # @@ -18,12 +18,14 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # Image URL to use all building/pushing image targets IMG ?= controller:latest -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -# API version has to be v1 to use defaulting (https://github.com/kubernetes-sigs/controller-tools/issues/478) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" +# Enable allowDangerousTypes to use float type in CRD +# Remove the Desc to avoid YAML getting too long. See the discussion: +# https://github.com/kubernetes-sigs/kubebuilder/issues/1140 +CRD_OPTIONS ?= "crd:maxDescLen=0,allowDangerousTypes=true" # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.21 - +ENVTEST_K8S_VERSION = 1.29.0 +# Operator YAML file +OPERATOR_YAML=$$(basename $$(pwd)).yaml # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -38,107 +40,145 @@ SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec all: build - ##@ Development manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases - + generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." - + fmt: ## Run go fmt against code. go fmt ./... - + vet: ## Run go vet against code. go vet ./... - -TEST ?= ./apis/... ./commons/... ./controllers/... + +TEST ?= ./apis/database/v1alpha1 ./commons/... ./controllers/... test: manifests generate fmt vet envtest ## Run unit tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test $(TEST) -coverprofile cover.out - + E2ETEST ?= ./test/e2e/ e2e: manifests generate fmt vet envtest ## Run e2e tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test $(E2ETEST) -v -timeout 40m -ginkgo.v -ginkgo.failFast - + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test $(E2ETEST) -test.timeout 0 -test.v --ginkgo.fail-fast + ##@ Build - + build: generate fmt vet ## Build manager binary. go build -o bin/manager main.go - + run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go - -docker-build: test ## Build docker image with the manager. - docker build --no-cache=true --build-arg http_proxy=${HTTP_PROXY} --build-arg https_proxy=${HTTPS_PROXY} . -t ${IMG} - -#docker-build-proxy: test -# docker build --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${https_proxy} build . -t ${IMG} - + +GOLANG_VERSION ?= 1.23.3 +## Download golang in the Dockerfile if BUILD_INTERNAL is set to true. +## Otherwise, use golang image from docker hub as the builder. +ifeq ($(BUILD_INTERNAL), true) +BUILDER_IMG = oraclelinux:9 +BUILD_ARGS = --build-arg BUILDER_IMG=$(BUILDER_IMG) --build-arg GOLANG_VERSION=$(GOLANG_VERSION) --build-arg INSTALL_GO=true +else +BUILDER_IMG = golang:$(GOLANG_VERSION) +BUILD_ARGS = --build-arg BUILDER_IMG=$(BUILDER_IMG) --build-arg INSTALL_GO="false" --build-arg GOLANG_VERSION=$(GOLANG_VERSION) +endif +ifeq ($(BUILD_MANIFEST), true) +BUILD_ARGS := $(BUILD_ARGS) --platform=linux/arm64,linux/amd64 --jobs=2 --manifest +PUSH_ARGS := manifest +else +BUILD_ARGS := $(BUILD_ARGS) --platform=linux/amd64 --tag +endif +docker-build: #manifests generate fmt vet #test ## Build docker image with the manager. Disable the test but keep the validations to fail fast + docker build --no-cache=true --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy=$(HTTPS_PROXY) \ + --build-arg CI_COMMIT_SHA=$(CI_COMMIT_SHA) --build-arg CI_COMMIT_BRANCH=$(CI_COMMIT_BRANCH) \ + $(BUILD_ARGS) $(IMG) . + docker-push: ## Push docker image with the manager. - docker push ${IMG} + docker $(PUSH_ARGS) push $(IMG) -##@ Deployment +# Push to minikube's local registry enabled by registry add-on +minikube-push: + docker tag $(IMG) $$(minikube ip):5000/$(IMG) + docker push --tls-verify=false $$(minikube ip):5000/$(IMG) +##@ Deployment + install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - - + uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl delete -f - - + deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/default | kubectl apply -f - +minikube-deploy: minikube-operator-yaml minikube-push + kubectl apply -f $(OPERATOR_YAML) + +# Bug:34265574 +# Used sed to reposition the controller-manager Deployment after the certificate creation in the OPERATOR_YAML operator-yaml: manifests kustomize - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default > $$(basename $$(pwd)).yaml + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/default > "$(OPERATOR_YAML)" + sed -i.bak -e '/^apiVersion: apps\/v1/,/---/d' "$(OPERATOR_YAML)" + (echo --- && sed '/^apiVersion: apps\/v1/,/---/!d' "$(OPERATOR_YAML).bak") >> "$(OPERATOR_YAML)" + rm "$(OPERATOR_YAML).bak" + +minikube-operator-yaml: IMG:=localhost:5000/$(IMG) +minikube-operator-yaml: operator-yaml + sed -i.bak 's/\(replicas.\) 3/\1 1/g' "$(OPERATOR_YAML)" + rm "$(OPERATOR_YAML).bak" undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - - - -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.1) - -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) - -ENVTEST = $(shell pwd)/bin/setup-envtest -envtest: ## Download envtest-setup locally if necessary. - $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) - -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef - + +##@ Build Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.3.0 +CONTROLLER_TOOLS_VERSION ?= v0.16.5 + +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + + .PHONY: bundle bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files. operator-sdk generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) operator-sdk bundle validate ./bundle - + .PHONY: bundle-build bundle-build: ## Build the bundle image. docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . - + .PHONY: bundle-push bundle-push: ## Push the bundle image. $(MAKE) docker-push IMG=$(BUNDLE_IMG) - + .PHONY: opm OPM = ./bin/opm opm: ## Download opm locally if necessary. @@ -150,31 +190,31 @@ ifeq (,$(shell which opm 2>/dev/null)) OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ chmod +x $(OPM) ;\ - } + } else OPM = $(shell which opm) endif endif - + # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). # These images MUST exist in a registry and be pull-able. BUNDLE_IMGS ?= $(BUNDLE_IMG) - + # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) - + # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. ifneq ($(origin CATALOG_BASE_IMG), undefined) FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) endif - + # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator .PHONY: catalog-build catalog-build: opm ## Build a catalog image. $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) - + # Push the catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. diff --git a/PREREQUISITES.md b/PREREQUISITES.md index 0dd557ae..3c73ad4b 100644 --- a/PREREQUISITES.md +++ b/PREREQUISITES.md @@ -2,13 +2,14 @@ ## Prerequisites for Using Oracle Database Operator for Kubernetes -Oracle Database operator for Kubernetes (OraOperator) manages all Cloud deployments of Oracle Database, including: +Oracle Database Operator for Kubernetes (`OraOperator`) manages all Cloud deployments of Oracle Database, including: * Oracle Autonomous Database (ADB) * Containerized Oracle Database Single Instance (SIDB) -* Containerized Sharded Oracle Database (SHARDING) +* Containerized Oracle Globally Distributed Database (GDD) ### Setting Up a Kubernetes Cluster and Volumes +Review and complete each step as needed. #### Setting Up an OKE Cluster on Oracle Cloud Infrastructure (OCI) @@ -18,16 +19,16 @@ To set up a Kubernetes cluster on Oracle Cloud Infrastructure: 1. Create an OKE Cluster 1. Provision persistent storage for data files (NFS or Block) -Note: You must provision persistent storage if you intend to deploy containerized databases over the OKE cluster. +Note: If you intend to deploy containerized databases over the OKE cluster, then you must provision persistent storage. ### Prerequites for Oracle Autonomous Database (ADB) -If you intent to use `OraOperator` to handle Oracle Autonomous Database lifecycles, then read [Oracle Autonomous Database prerequisites](./doc/adb/ADB_PREREQUISITES.md) +If you intend to use `OraOperator` to handle Oracle Autonomous Database lifecycles, then read [Oracle Autonomous Database prerequisites](./docs/adb/ADB_PREREQUISITES.md) ### Prerequites for Single Instance Databases (SIDB) -If you intent to use `OraOperator` to handle Oracle Database Single Instance lifecycles, then read [Single Instance Database Prerequisites](./doc/sidb/SIDB_PREREQUISITES.md) +If you intend to use `OraOperator` to handle Oracle Database Single Instance lifecycles, then read [Single Instance Database Prerequisites](./docs/sidb/PREREQUISITES.md) -### Prerequites for Sharded Databases (SHARDING) +### Prerequites for Oracle Globally Distributed Databases(GDD) - If you intent to use OraOperator to handle the lifecycle of Oracle Database deployed with Oracle Sharding, then read [Sharded Database Prerequisites](./doc/sharding/README.md#prerequsites-for-running-oracle-sharding-database-controller) + If you intent to use OraOperator to handle the lifecycle of Oracle Globally Distributed Database(GDD), then read [Oracle Globally Distributed Database Prerequisites](./docs/sharding/README.md#prerequsites-for-running-oracle-sharding-database-controller) diff --git a/PROJECT b/PROJECT index 70e34778..97e9409c 100644 --- a/PROJECT +++ b/PROJECT @@ -1,9 +1,14 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: oracle.com layout: - go.kubebuilder.io/v2 multigroup: true plugins: - go.sdk.operatorframework.io/v2-alpha: {} + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} projectName: oracle-database-operator repo: github.com/oracle/oracle-database-operator resources: @@ -16,6 +21,34 @@ resources: kind: AutonomousDatabase path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 version: v1alpha1 + webhooks: + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: AutonomousDatabaseBackup + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: AutonomousDatabaseRestore + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + validation: true + webhookVersion: v1beta1 - api: crdVersion: v1 namespaced: true @@ -38,4 +71,192 @@ resources: kind: ShardingDatabase path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: PDB + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: CDB + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: OracleRestDataService + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: AutonomousContainerDatabase + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + validation: true + webhookVersion: v1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: DbcsSystem + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: database + kind: DataguardBroker + path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + controller: true + domain: oracle.com + group: observability + kind: DatabaseObserver + path: github.com/oracle/oracle-database-operator/apis/observability/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: ShardingDatabase + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: DbcsSystem + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: LREST + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: LRPDB + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: OrdsSrvs + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: observability + kind: DatabaseObserver + path: github.com/oracle/oracle-database-operator/apis/observability/v1 + version: v1 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: observability + kind: DatabaseObserver + path: github.com/oracle/oracle-database-operator/apis/observability/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: SingleInstanceDatabase + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: DataguardBroker + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: OracleRestDataService + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 version: "3" diff --git a/README.md b/README.md index 76d97495..936ae23a 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,81 @@ # Oracle Database Operator for Kubernetes -## Make Oracle Database Kubernetes-Native - -As part of Oracle's resolution to make Oracle Database Kubernetes-native (that is, observable and operable by Kubernetes), Oracle is announcing _Oracle Database Operator for Kubernetes_ (`OraOperator`). - -Since Oracle Database 19c, Oracle Database images have been supported in containers (Docker, Podman) for production use and Kubernetes deployment with Helm Charts. This release includes Oracle Database Operator, which is a new open source product that extends the Kubernetes API with custom resources and controllers for automating Oracle Database lifecycle management. - -In this release, `OraOperator` supports the following Oracle Database configurations: - -* Oracle Autonomous Database on shared Oracle Cloud Infrastructure (OCI), also known as ADB-S -* Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) -* Containerized Sharded databases (SHARDED) deployed in OKE - -Oracle will continue to expand Oracle Database Operator support for additional Oracle Database configurations. - -## Features Summary +## Make Oracle Database Kubernetes Native + +As part of Oracle's resolution to make Oracle Database Kubernetes native (that is, observable and operable by Kubernetes), Oracle released the _Oracle Database Operator for Kubernetes_ (`OraOperator` or the operator). OraOperator extends the Kubernetes API with custom resources and controllers for automating the management of the Oracle Database lifecycle. + +## Supported Database Configurations in V1.2.0 +In this v1.2.0 production release, `OraOperator` supports the following database configurations, and controllers: + +* Oracle Autonomous Database: + * Oracle Autonomous Database shared Oracle Cloud Infrastructure (OCI) (ADB-S) + * Oracle Autonomous Database on dedicated Cloud infrastructure (ADB-D) + * Oracle Autonomous Container Database (ACD), the infrastructure for provisioning Autonomous Databases. +* Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) and any k8s where OraOperator is deployed +* Containerized Oracle Globally Distributed Databases(GDD) deployed in OKE and any k8s where OraOperator is deployed +* Oracle Multitenant Databases (CDB/PDBs) +* Oracle Base Database Service (OBDS) on Oracle Cloud Infrastructure (OCI) +* Oracle Data Guard +* Oracle Database Observability +* Oracle Database Rest Service (ORDS) instances + +## New Lifecycle Features in V1.2.0 Release (Controllers Enhancements) +* ORDSSERVICES + - Install on SIDB and ADB + - Provision and Delete ORDS instances +* SIDB + - Oracle Database 23ai Free support + - Oracle Database 23ai Free-lite support + - SIDB resource management + - True Cache support for Free SIDB databases (Preview) + - Observer for FastStartFailover with Data Guard + - Snapshot Standby support in Data Guard setup +* Globally Distributed Database : Support for Oracle Database 23ai Raft replication +* Autonomous Database: support for Database cloning +* Multitenant DB: + - ORDS-based Controller: assertive deletion policy. + - New LRES based Controller (ARM & AM) + - PDBs settings with init parameters config map + - Assertive deletion policy. +* Database Observability (preview) + - Support for Database Logs (in addition to Metrics) + - Support for the latest Exporter container images + - Bug Fix: Prometheus label config +* Oracle Base Database Service: support for Oracle Database 23ai Cloning, using KMS Vaults, PDB creation. + +## New Product Features +*The Operator itself, as a product, brings the following new features: +* Published on `operatorhub.io` +* Operator Lifecycle Manager (OLM) support (install from `operatorhub.io`) +* Validated on Google Kubernetes Engine + +## Overall Features Summary This release of Oracle Database Operator for Kubernetes (the operator) supports the following lifecycle operations: -* ADB-S: provision, bind, start, stop, terminate (soft/hard), scale (down/up) -* SIDB: provision, clone, patch (in-place/out-of-place), update database initialization parameters, update database configuration (Flashback, archiving), Oracle Enterprise Manager (EM) Express (a basic observability console) -* SHARDED: provision/deploy sharded databases and the shard topology, add a new shard, delete an existing shard +* ADB-S/ADB-D: Provision, bind, start, stop, terminate (soft/hard), scale (up/down), long-term backup, manual restore, cloning. +* ACD: Provision, bind, restart, terminate (soft/hard) +* SIDB: Provision, clone, patch (in-place/out-of-place), update database initialization parameters, update database configuration (Flashback, archiving), Oracle Enterprise Manager (EM) Express (basic console), Oracle REST Data Service (ORDS) to support REST based SQL, PDB management, SQL Developer Web, Application Express (Apex), Resource management, True Cache, Observer for FastStartFailover (Data Guard), and Snapshot Standby (Data Guard) +* ORDS Services: Provision and delete ORDS instances +* Globally Distrib. (Sharded): Provision/deploy sharded databases and the shard topology, Add a new shard, Delete an existing shard, Raft replication. +* Oracle Multitenant Database (choice of controller): Bind to a CDB, Create a  PDB, Plug a  PDB, Unplug a PDB, Delete a PDB, Clone a PDB, Open/Close a PDB, Assertive deletion policy +* Oracle Base Database Service (OBDS): Provision, bind, scale shape Up/Down, Scale Storage Up, Terminate and Update License, Cloning, PDB creation, using KMS Vaults on Oracle Cloud Infrastructure (OCI) +* Oracle Data Guard: Provision a Standby for the SIDB resource, Create a Data Guard Configuration, Perform a Switchover, Patch Primary and Standby databases in Data Guard Configuration +* Oracle Database Observability: create, patch, delete `databaseObserver` resources (Logs and Metrics) +* Watch over a set of namespaces or all the namespaces in the cluster using the `WATCH_NAMESPACE` environment variable of the operator deployment -Upcoming releases will support new configurations, operations and capabilities. ## Release Status -**CAUTION:** The current release of `OraOperator` (v0.1.0) is for development and test only. DO NOT USE IN PRODUCTION. +This production release has been installed and tested on the following Kubernetes platforms: -This release can be deployed on the following platforms: - -* [Oracle Container Engine for Kubernetes (OKE)](https://www.oracle.com/cloud-native/container-engine-kubernetes/) with Kubernetes 1.17 or later -* In an on-premises [Oracle Linux Cloud Native Environment(OLCNE)](https://docs.oracle.com/en/operating-systems/olcne/) 1.3 or later - -In upcoming releases, the operator will be certified against third-party Kubernetes clusters. +* [Oracle Container Engine for Kubernetes (OKE)](https://www.oracle.com/cloud-native/container-engine-kubernetes/) with Kubernetes 1.24 +* [Oracle Linux Cloud Native Environment(OLCNE)](https://docs.oracle.com/en/operating-systems/olcne/) 1.6 +* [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) +* [Amazon Elastic Kubernetes Service](https://aws.amazon.com/eks/) +* [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs) +* [Red Hat OKD](https://www.okd.io/) +* [Minikube](https://minikube.sigs.k8s.io/docs/) with version v1.29.0 ## Prerequisites @@ -41,29 +83,89 @@ Oracle strongly recommends that you ensure your system meets the following [Prer * ### Install cert-manager - The operator uses webhooks for validating user input before persisting it in Etcd. Webhooks require TLS certificates that are generated and managed by a certificate manager. + The operator uses webhooks for validating user input before persisting it in `etcd`. Webhooks require TLS certificates that are generated and managed by a certificate manager. Install the certificate manager with the following command: ```sh - kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml ``` -## Quick Install of the Operator +* ### Create Role Bindings for Access Management + + OraOperator supports the following two modes of deployment: + ##### 1. Cluster Scoped Deployment + + This is the default mode, in which OraOperator is deployed to operate in a cluster, and to monitor all the namespaces in the cluster. + + - Grant the `serviceaccount:oracle-database-operator-system:default` clusterwide access for the resources by applying [cluster-role-binding.yaml](./rbac/cluster-role-binding.yaml) - To install the operator in the cluster quickly, you can use a single [oracle-database-operator.yaml](https://github.com/oracle/oracle-database-operator/blob/main/oracle-database-operator.yaml) file. Operator pod replicas are set to a default of 3 for High Availability, which can be scaled up and down. + ```sh + kubectl apply -f rbac/cluster-role-binding.yaml + ``` - Run the following command + - Next, apply the [oracle-database-operator.yaml](./oracle-database-operator.yaml) to deploy the Operator + + ```sh + kubectl apply -f oracle-database-operator.yaml + ``` + + ##### 2. Namespace Scoped Deployment + + In this mode, `OraOperator` can be deployed to operate in a namespace, and to monitor one or many namespaces. + + - Grant `serviceaccount:oracle-database-operator-system:default` service account with resource access in the required namespaces. For example, to monitor only the default namespace, apply the [`default-ns-role-binding.yaml`](./rbac/default-ns-role-binding.yaml) + + ```sh + kubectl apply -f rbac/default-ns-role-binding.yaml + ``` + To watch additional namespaces, create different role binding files for each namespace, using [default-ns-role-binding.yaml](./rbac/default-ns-role-binding.yaml) as a template, and changing the `metadata.name` and `metadata.namespace` fields + + - Next, edit the [`oracle-database-operator.yaml`](./oracle-database-operator.yaml) to add the required namespaces under `WATCH_NAMESPACE`. Use comma-delimited values for multiple namespaces. + + ```sh + - name: WATCH_NAMESPACE + value: "default" + ``` + - Finally, apply the edited [`oracle-database-operator.yaml`](./oracle-database-operator.yaml) to deploy the Operator + + ```sh + kubectl apply -f oracle-database-operator.yaml + ``` + +* ### ClusterRole and ClusterRoleBinding for NodePort services + + To expose services on each node's IP and port (the NodePort), apply the [`node-rbac.yaml`](./rbac/node-rbac.yaml). Note that this step is not required for LoadBalancer services. ```sh - kubectl apply -f oracle-database-operator.yaml + kubectl apply -f rbac/node-rbac.yaml ``` +## Installation +### Install Oracle DB Operator + + After you have completed the preceding prerequisite changes, you can install the operator. To install the operator in the cluster quickly, you can apply the modified `oracle-database-operator.yaml` file from the preceding step. - Ensure that operator pods are up and running + Run the following command + + ```sh + kubectl apply -f oracle-database-operator.yaml + ``` + +## Install Oracle DB Operator + + After you have completed the preceding prerequisite changes, you can install the operator. To install the operator in the cluster quickly, you can apply the modified `oracle-database-operator.yaml` file from the preceding step. + + Run the following command + + ```sh + kubectl apply -f oracle-database-operator.yaml + ``` + + Ensure that the operator pods are up and running. For high availability, operator pod replicas are set to a default of 3. You can scale this setting up or down. ```sh $ kubectl get pods -n oracle-database-operator-system - + NAME READY STATUS RESTARTS AGE pod/oracle-database-operator-controller-manager-78666fdddb-s4xcm 1/1 Running 0 11d pod/oracle-database-operator-controller-manager-78666fdddb-5k6n4 1/1 Running 0 11d @@ -75,83 +177,100 @@ Oracle strongly recommends that you ensure your system meets the following [Prer You should see that the operator is up and running, along with the shipped controllers. -For more details, see [Oracle Database Operator Installation Instrunctions](./doc/installation/OPERATOR_INSTALLATION_README.md). - +For more details, see [Oracle Database Operator Installation Instructions](./docs/installation/OPERATOR_INSTALLATION_README.md). +## Documentation ## Getting Started with the Operator (Quickstart) -The quickstarts are designed for specific database configurations, including: +The following quickstarts are designed for specific database configurations: + +* [Oracle Autonomous Database](./docs/adb/README.md) +* [Oracle Autonomous Container Database](./docs/adb/ACD.md) +* [Containerized Oracle Single Instance Database and Data Guard](./docs/sidb/README.md) +* [Containerized Oracle Globally Distributed Database](./docs/sharding/README.md) +* [Oracle Multitenant Database](./docs/multitenant/README.md) +* [Oracle Base Database Service (OBDS)](./docs/dbcs/README.md) +* [ORDS Services (ORDSSRVS)](./docs/ordsservices/README.md) + -* [Oracle Autonomous Database](./doc/adb/README.md) -* [Oracle Database Single Instance configuration](./doc/sidb/README.md) -* [Oracle Database configured with Oracle Sharding](./doc/sharding/README.md) +The following quickstart is designed for non-database configurations: +* [Oracle Database Observability](./docs/observability/README.md) -YAML file templates are available under [`/config/samples`](./config/samples/). You can copy and edit these template files to configure them for your use cases. +## Examples +YAML file templates are available under [`/config/samples`](./config/samples/). You can copy and edit these template files to configure them for your use cases. ## Uninstall the Operator - To uninstall the operator, the final step consists of deciding whether or not you want to delete the CRDs and APIServices that were introduced to the cluster by the operator. Choose one of the following options: + To uninstall the operator, the final step consists of deciding whether you want to delete the custom resource definitions (CRDs) and Kubernetes `APIServices` introduced into the cluster by the operator. Choose one of the following options: -* ### Deleting the CRDs and APIServices +* ### Delete the CRDs and APIServices To delete all the CRD instances deployed to cluster by the operator, run the following commands, where is the namespace of the cluster object: ```sh + kubectl delete oraclerestdataservice.database.oracle.com --all -n kubectl delete singleinstancedatabase.database.oracle.com --all -n kubectl delete shardingdatabase.database.oracle.com --all -n + kubectl delete dbcssystem.database.oracle.com --all -n kubectl delete autonomousdatabase.database.oracle.com --all -n + kubectl delete autonomousdatabasebackup.database.oracle.com --all -n + kubectl delete autonomousdatabaserestore.database.oracle.com --all -n + kubectl delete autonomouscontainerdatabase.database.oracle.com --all -n + kubectl delete cdb.database.oracle.com --all -n + kubectl delete pdb.database.oracle.com --all -n + kubectl delete dataguardbrokers.database.oracle.com --all -n + kubectl delete databaseobserver.observability.oracle.com --all -n ``` - After all CRD instances are deleted, it is safe to remove the CRDs, APISerivces and operator deployment. +* ### Delete the RBACs ```sh - kubectl delete -f oracle-database-operator.yaml --ignore-not-found=true + cat rbac/* | kubectl delete -f - ``` - Note: If the CRD instances are not deleted, and the operator is deleted by using the preceding command, then operator deployment and instance objects (pods,services,PVCs, and so on) are deleted. However, the CRD deletion stops responding, because the CRD instances have finalizers that can only be removed by the operator pod, which is deleted when the APIServices are deleted. - -* ### Retaining the CRDs and APIservices +* ### Delete the Deployment - To delete the operator deployment and retain the CRDs, run the following commands: + After all CRD instances are deleted, it is safe to remove the CRDs, APIServices and operator deployment. To remove these files, use the following command: ```sh - kubectl delete deployment.apps/oracle-database-operator-controller-manager -n oracle-database-operator-system + kubectl delete -f oracle-database-operator.yaml --ignore-not-found=true ``` -## Documentation + Note: If the CRD instances are not deleted, and the operator is deleted by using the preceding command, then operator deployment and instance objects (pods, services, PVCs, and so on) are deleted. However, if that happens, then the CRD deletion stops responding. This is because the CRD instances have properties that prevent their deletion, and that can only be removed by the operator pod, which is deleted when the APIServices are deleted. + +## Documentation for the supported Oracle Database configurations * [Oracle Autonomous Database](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/adboverview.htm) +* [Components of Dedicated Autonomous Database](https://docs.oracle.com/en-us/iaas/autonomous-database/doc/components.html) * [Oracle Database Single Instance](https://docs.oracle.com/en/database/oracle/oracle-database/) -* [Oracle Database Sharding](https://docs.oracle.com/en/database/oracle/oracle-database/21/shard/index.html) +* [Oracle Globally Distributed Database](https://docs.oracle.com/en/database/oracle/oracle-database/21/shard/index.html) +* [Oracle Database Cloud Service](https://docs.oracle.com/en/database/database-cloud-services.html) ## Contributing -See [Contributing to this Repository](./CONTRIBUTING.md) +This project welcomes contributions from the community. Before submitting a pull request, please [review our contribution guide](./CONTRIBUTING.md) -## Support +## Help -You can submit a GitHub issue, or you can also file an [Oracle Support service](https://support.oracle.com/portal/) request, using the product id: 14430. +You can submit a GitHub issue, or submit an issue and then file an [Oracle Support service](https://support.oracle.com/portal/) request. To file an issue or a service request, use the following product ID: 14430. ## Security -Secure platforms are an important basis for general system security. Ensure that your deployment is in compliance with common security practices. +Please consult the [security guide](./SECURITY.md) for our responsible security vulnerability disclosure process ### Managing Sensitive Data + Kubernetes secrets are the usual means for storing credentials or passwords input for access. The operator reads the Secrets programmatically, which limits exposure of sensitive data. However, to protect your sensitive data, Oracle strongly recommends that you set and get sensitive data from Oracle Cloud Infrastructure Vault, or from third-party Vaults. The following is an example of a YAML file fragment for specifying Oracle Cloud Infrastructure Vault as the repository for the admin password. - ``` + +```yaml adminPassword: ociSecretOCID: ocid1.vaultsecret.oc1... ``` -Examples in this repository where passwords are entered on the command line are for demonstration purposes only. - -### Reporting a Security Issue - -See [Reporting security vulnerabilities](./SECURITY.md) - +Examples in this repository where passwords are entered on the command line are for demonstration purposes only. ## License -Copyright (c) 2021 Oracle and/or its affiliates. +Copyright (c) 2022, 2025 Oracle and/or its affiliates. Released under the Universal Permissive License v1.0 as shown at [https://oss.oracle.com/licenses/upl/](https://oss.oracle.com/licenses/upl/) diff --git a/SECURITY.md b/SECURITY.md index 5acefa28..2ca81027 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,32 +1,36 @@ -# Reporting Security Vulnerabilities +# Reporting security vulnerabilities -Oracle values the independent security research community, and believes that -responsible disclosure of security vulnerabilities helps us to ensure the security -and privacy of all of our users. +Oracle values the independent security research community and believes that +responsible disclosure of security vulnerabilities helps us ensure the security +and privacy of all our users. Please do NOT raise a GitHub Issue to report a security vulnerability. If you -believe you have found a security vulnerability, then please submit a report to +believe you have found a security vulnerability, please submit a report to [secalert_us@oracle.com][1] preferably with a proof of concept. Please review some additional information on [how to report security vulnerabilities to Oracle][2]. -Oracle encourages anyone who contacts Oracle Security to use email encryption, using +We encourage people who contact Oracle Security to use email encryption using [our encryption key][3]. -Please do not use other channels, or contact the project maintainers +We ask that you do not use other channels or contact the project maintainers directly. -For non-vulnerability related security issues, including ideas for new or improved -security features, you are welcome to post these as GitHub Issues. +Non-vulnerability related security issues including ideas for new or improved +security features are welcome on GitHub Issues. -## Security Updates, Alerts and Bulletins +## Security updates, alerts and bulletins -Oracle issues security updates on a regular cadence. Many of our projects typically include release security fixes in conjunction with the [Oracle Critical Patch Update][3] program. Security updates are released on the -Tuesday closest to the 17th day of January, April, July and October. A pre-release announcement will be published on the Thursday preceding each release. Additional information, including past advisories, is available on our [security alerts][4] +Security updates will be released on a regular cadence. Many of our projects +will typically release security fixes in conjunction with the +Oracle Critical Patch Update program. Additional +information, including past advisories, is available on our [security alerts][4] page. -## Security-Related Information +## Security-related information -Oracle will provide security-related information in our documentation. The information can be a threat model, best practices for secure use, or any known security issues. Please note -that labs and example code are intended to demonstrate a concept. These examples should not be used for production use without ensuring that the code is hardened, and in compliance with common security practices. +We will provide security related information such as a threat model, considerations +for secure use, or any known security issues in our documentation. Please note +that labs and sample code are intended to demonstrate a concept and may not be +sufficiently hardened for production use. [1]: mailto:secalert_us@oracle.com [2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index 792cde69..14e4308f 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -1,7 +1,809 @@ -Operator SDK +------------------------------------- +Operator SDK 1.37.0 https://github.com/operator-framework/operator-sdk +Apache 2.0 + +------------------------------------- + +Apache License: + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + ------------------------------ + GO lang 1.23.3 + https://github.com/golang + + + Copyright (c) 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------- +apimachinery 0.31.3 +https://github.com/kubernetes/apimachinery/tr +Apache 2.0 + +------------------------- +controller-runtime 0.19.3 +https://github.com/kubernetes-sigs/controller-runtime/releases/tag/v0.16.3 +Apache 2.0 + +------------------------- +golang 1.23.3 +https://github.com/golang/go/releases/tag/go1.21.4 + +BSD 2-clause or 3-clause License: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +------------------------------- Copyright notices -------------------------- +-- various source files +Copyright 2021 The Go Authors. All rights reserved. +-- various go source files under src/cmd/compile/internal/ and src/cmd/link/internal/ +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.c\om) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to dea\l +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM\, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +---------------------------- Fourth-party information ---------------------- +== NAME OF DEPENDENCY 1 +github.com/google/pprof +== License Type +Apache 2.0 +== Copyright Notices +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 2 +github.com/chzyer/readline +== License Type +MIT +== Copyright Notices +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 3 +github.com/chzyer/test +== License Type +MIT +== Copyright Notices +The MIT License (MIT) + +Copyright (c) 2016 chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 4 +github.com/chzyer/logex +== License Type +MIT +== Copyright Notices +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 5 +golang.org/x/sys +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 6 +github.com/ianlancetaylor/demangle +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2015 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 7 +golang.org/x/arch +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2015 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 8 +golang.org/x/mod +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 9 +golang.org/x/sync +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 10 +golang.org/x/sys +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 11 +golang.org/x/term +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 12 +golang.org/x/tools +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 13 +golang.org/x/crypto +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 14 +github.com/mmcloughlin/avo +== License Type +BSD 3-clause +== Copyright Notices +BSD 3-Clause License + +Copyright (c) 2018, Michael McLoughlin +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 15 +golang.org/x/net +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== NAME OF DEPENDENCY 16 +golang.org/x/text +== License Type +BSD 3-clause +== Copyright Notices +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--------------------------------(separator)--------------------------------- +== LICENSES +. +== Text of license (Apache 2.0) -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -202,54 +1004,25 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - ------------------------------ - GO lang - https://github.com/golang - - Copyright (c) 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------- -Logr +Logr 1.4.2 https://pkg.go.dev/github.com/go-logr/logr +https://github.com/go-logr/logr/tree/v1.3.0 Apache 2.0 License ------------------------- -OCI Go SDK -github.com/oracle/oci-go-sdk/v43 +OCI Go SDK 65.77.1 +https://github.com/oracle/oci-go-sdk/releases/tag/v65.53.0 Dual-License: UPL + Apache 2.0 -Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. -This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 +UPL license: + +Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. +This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl -or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. +or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. Copyright (c) 2019, 2020 Oracle and/or its affiliates. @@ -287,27 +1060,69 @@ The Universal Permissive License (UPL), Version 1.0 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------- +ginkgo 2.202. +https://github.com/onsi/ginkgo/releases/tag/v2.13.1 +MIT +------------------------------------ +Gomega +github.com/onsi/gomega +MIT License +Copyright (c) 2013-2014 Onsi Fakhouri + +---------------------------- +gomega 1.34.2 +http://onsi.github.io/gomega/ +MIT + ------------------------- -Kubernetes api +Kubernetes api 0.31.3 https://pkg.go.dev/k8s.io/api Apache 2.0 ---------------------------------- -Kubernetes apimachinery +Kubernetes apimachinery 0.31.3 https://pkg.go.dev/k8s.io/apimachinery Apache 2.0 ----------------------------------- -Kubernetes client-go +Kubernetes client-go 0.31.3 https://pkg.go.dev/k8s.io/client-go Apache 2.0 ------------------------------------- -Kubernetes controller-runtime project +Kubernetes controller-runtime project 0.19.3 https://pkg.go.dev/sigs.k8s.io/controller-runtime Apache 2.0 ------------------------------------ +kubernetes-sigs/yaml 1.4.0 +https://github.com/kubernetes-sigs/yaml/tree/v1.3.0 +MIT + +------------------------- +OCI SDK for Go 65.77.1 +https://github.com/oracle/oci-go-sdk +Multiple Licenses: Apache 2.0, UPL + +------------------------------ +Operator Lifecycle Manager (OLM) 0.30.0 +github.com/operator-framework/operator-lifecycle-manager +Apache 2.0 + + +------------------------------------ +Prometheus Operator 0.78.2 +https://github.com/prometheus-operator/prometheus-operator +Apache 2.0 + +------------------------------------ +yaml 3.0.1 +https://github.com/go-yaml/yaml/releases/tag/v3.0.1 +Dual license: Apache 2.0, MIT + YAML support for the Go language https://pkg.go.dev/gopkg.in/yaml.v2 Apache 2.0 @@ -315,8 +1130,14 @@ Apache 2.0 ------------------------------------ YAML marshaling and unmarshaling support for Go https://pkg.go.dev/sigs.k8s.io/yaml -DUal license: BSD-3-Clause, MIT +Dual license: BSD-3-Clause, MIT + +------------------------------------ +zap 1.27.0 +https://github.com/uber-go/zap/releases/tag/v1.27.0 +MIT +------------------------------------ The MIT License (MIT) Copyright (c) 2014 Sam Ghods @@ -340,7 +1161,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Copyright (c) 2012 The Go Authors. +Copyright (c) 2012 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -369,9 +1190,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------ -Ginkgo +Ginkgo 2.20.2 github.com/onsi/ginkgo -MIT License +MIT License Copyright (c) 2013-2014 Onsi Fakhouri Permission is hereby granted, free of charge, to any person obtaining @@ -393,13 +1214,3 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------- -Gomega -github.com/onsi/gomega -MIT License -Copyright (c) 2013-2014 Onsi Fakhouri - ------------------------------------- -Operator Lifecycle Manager (OLM) -github.com/operator-framework/operator-lifecycle-manager -Apache 2.0 - diff --git a/apis/database/v1alpha1/adbfamily_common_spec.go b/apis/database/v1alpha1/adbfamily_common_spec.go new file mode 100644 index 00000000..74eb9f94 --- /dev/null +++ b/apis/database/v1alpha1/adbfamily_common_spec.go @@ -0,0 +1,67 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec +const LastSuccessfulSpec string = "lastSuccessfulSpec" + +/************************ +* OCI config +************************/ +type OciConfigSpec struct { + ConfigMapName *string `json:"configMapName,omitempty"` + SecretName *string `json:"secretName,omitempty"` +} + +/************************ +* ADB spec +************************/ +type K8sAdbSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAdbSpec struct { + Ocid *string `json:"ocid,omitempty"` +} + +// TargetSpec defines the spec of the target for backup/restore runs. +type TargetSpec struct { + K8sAdb K8sAdbSpec `json:"k8sADB,omitempty"` + OciAdb OciAdbSpec `json:"ociADB,omitempty"` +} diff --git a/apis/database/v1alpha1/autonomouscontainerdatabase_types.go b/apis/database/v1alpha1/autonomouscontainerdatabase_types.go new file mode 100644 index 00000000..fd71b210 --- /dev/null +++ b/apis/database/v1alpha1/autonomouscontainerdatabase_types.go @@ -0,0 +1,224 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "encoding/json" + "reflect" + + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// name of our custom finalizer +const ACDFinalizer = "database.oracle.com/acd-finalizer" + +type AcdActionEnum string + +const ( + AcdActionBlank AcdActionEnum = "" + AcdActionRestart AcdActionEnum = "RESTART" + AcdActionTerminate AcdActionEnum = "TERMINATE" +) + +func GetAcdActionEnumFromString(val string) (AcdActionEnum, bool) { + var mappingAcdActionEnum = map[string]AcdActionEnum{ + "RESTART": AcdActionRestart, + "TERMINATE": AcdActionTerminate, + "": AcdActionBlank, + } + + enum, ok := mappingAcdActionEnum[val] + return enum, ok +} + +// AutonomousContainerDatabaseSpec defines the desired state of AutonomousContainerDatabase +type AutonomousContainerDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + AutonomousContainerDatabaseOCID *string `json:"autonomousContainerDatabaseOCID,omitempty"` + CompartmentOCID *string `json:"compartmentOCID,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousExadataVMClusterOCID *string `json:"autonomousExadataVMClusterOCID,omitempty"` + // +kubebuilder:validation:Enum:="RELEASE_UPDATES";"RELEASE_UPDATE_REVISIONS" + PatchModel database.AutonomousContainerDatabasePatchModelEnum `json:"patchModel,omitempty"` + // +kubebuilder:validation:Enum:="SYNC";"RESTART";"TERMINATE" + Action AcdActionEnum `json:"action,omitempty"` + FreeformTags map[string]string `json:"freeformTags,omitempty"` + + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:default:=false + HardLink *bool `json:"hardLink,omitempty"` +} + +// AutonomousContainerDatabaseStatus defines the observed state of AutonomousContainerDatabase +type AutonomousContainerDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + LifecycleState database.AutonomousContainerDatabaseLifecycleStateEnum `json:"lifecycleState"` + TimeCreated string `json:"timeCreated,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="acd";"acds" +// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name="DisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +// +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string + +// AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases API +type AutonomousContainerDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousContainerDatabaseSpec `json:"spec,omitempty"` + Status AutonomousContainerDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousContainerDatabaseList contains a list of AutonomousContainerDatabase +type AutonomousContainerDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousContainerDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousContainerDatabase{}, &AutonomousContainerDatabaseList{}) +} + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (acd *AutonomousContainerDatabase) GetLastSuccessfulSpec() (*AutonomousContainerDatabaseSpec, error) { + val, ok := acd.GetAnnotations()[LastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := AutonomousContainerDatabaseSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} + +func (acd *AutonomousContainerDatabase) UpdateLastSuccessfulSpec() error { + specBytes, err := json.Marshal(acd.Spec) + if err != nil { + return err + } + + anns := acd.GetAnnotations() + + if anns == nil { + anns = map[string]string{ + LastSuccessfulSpec: string(specBytes), + } + } else { + anns[LastSuccessfulSpec] = string(specBytes) + } + + acd.SetAnnotations(anns) + + return nil +} + +// UpdateStatusFromOCIACD updates the status subresource +func (acd *AutonomousContainerDatabase) UpdateStatusFromOciAcd(ociObj database.AutonomousContainerDatabase) { + acd.Status.LifecycleState = ociObj.LifecycleState + acd.Status.TimeCreated = dbv4.FormatSDKTime(ociObj.TimeCreated) +} + +// UpdateFromOCIADB updates the attributes using database.AutonomousContainerDatabase object +func (acd *AutonomousContainerDatabase) UpdateFromOciAcd(ociObj database.AutonomousContainerDatabase) (specChanged bool) { + oldACD := acd.DeepCopy() + + /*********************************** + * update the spec + ***********************************/ + acd.Spec.Action = AcdActionBlank + acd.Spec.AutonomousContainerDatabaseOCID = ociObj.Id + acd.Spec.CompartmentOCID = ociObj.CompartmentId + acd.Spec.DisplayName = ociObj.DisplayName + acd.Spec.AutonomousExadataVMClusterOCID = ociObj.CloudAutonomousVmClusterId + acd.Spec.PatchModel = ociObj.PatchModel + + // special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. + if len(ociObj.FreeformTags) != 0 { + acd.Spec.FreeformTags = ociObj.FreeformTags + } else { + acd.Spec.FreeformTags = nil + } + + /*********************************** + * update the status subresource + ***********************************/ + acd.UpdateStatusFromOciAcd(ociObj) + + return !reflect.DeepEqual(oldACD.Spec, acd.Spec) +} + +// RemoveUnchangedSpec removes the unchanged fields in spec, and returns if the spec has been changed. +func (acd *AutonomousContainerDatabase) RemoveUnchangedSpec(prevSpec AutonomousContainerDatabaseSpec) (bool, error) { + changed, err := dbv4.RemoveUnchangedFields(prevSpec, &acd.Spec) + if err != nil { + return changed, err + } + + return changed, nil +} + +// A helper function which is useful for debugging. The function prints out a structural JSON format. +func (acd *AutonomousContainerDatabase) String() (string, error) { + out, err := json.MarshalIndent(acd, "", " ") + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go new file mode 100644 index 00000000..10a16cd1 --- /dev/null +++ b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go @@ -0,0 +1,111 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomouscontainerdatabaselog = logf.Log.WithName("autonomouscontainerdatabase-resource") + +func (r *AutonomousContainerDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomouscontainerdatabases,versions=v1alpha1,name=vautonomouscontainerdatabasev1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousContainerDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateCreate() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate create", "name", r.Name) + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldACD *AutonomousContainerDatabase = old.(*AutonomousContainerDatabase) + + autonomouscontainerdatabaselog.Info("validate update", "name", r.Name) + + // skip the update of adding ADB OCID or binding + if oldACD.Status.LifecycleState == "" { + return nil, nil + } + + // cannot update when the old state is in intermediate state, except for the terminate operatrion + var copiedSpec *AutonomousContainerDatabaseSpec = r.Spec.DeepCopy() + changed, err := dbv4.RemoveUnchangedFields(oldACD.Spec, copiedSpec) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), err.Error())) + } + if dbv4.IsACDIntermediateState(oldACD.Status.LifecycleState) && changed { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), + "cannot change the spec when the lifecycleState is in an intermdeiate state")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousContainerDatabase"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateDelete() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/autonomouscontainerdatabase_webhook_test.go b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook_test.go new file mode 100644 index 00000000..668a575d --- /dev/null +++ b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook_test.go @@ -0,0 +1,119 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import ( + "context" + "encoding/json" + "time" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + // +kubebuilder:scaffold:imports +) + +var _ = Describe("test AutonomousContainerDatabase webhook", func() { + Describe("Test ValidateUpdate of the AutonomousContainerDatabase validating webhook", func() { + var ( + resourceName = "testacd" + namespace = "default" + acdLookupKey = types.NamespacedName{Name: resourceName, Namespace: namespace} + + acd *AutonomousContainerDatabase + + timeout = time.Second * 5 + ) + + BeforeEach(func() { + acd = &AutonomousContainerDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousContainerDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousContainerDatabaseSpec{ + AutonomousContainerDatabaseOCID: common.String("fake-acd-ocid"), + CompartmentOCID: common.String("fake-compartment-ocid"), + DisplayName: common.String("fake-displayName"), + AutonomousExadataVMClusterOCID: common.String("fake-vmcluster-ocid"), + PatchModel: database.AutonomousContainerDatabasePatchModelUpdates, + }, + } + + specBytes, err := json.Marshal(acd.Spec) + Expect(err).To(BeNil()) + + anns := map[string]string{ + LastSuccessfulSpec: string(specBytes), + } + acd.SetAnnotations(anns) + + Expect(k8sClient.Create(context.TODO(), acd)).To(Succeed()) + + // Change the lifecycleState to AVAILABLE + acd.Status.LifecycleState = database.AutonomousContainerDatabaseLifecycleStateAvailable + Expect(k8sClient.Status().Update(context.TODO(), acd)).To(Succeed()) + + // Make sure the object is created + Eventually(func() error { + createdACD := &AutonomousContainerDatabase{} + return k8sClient.Get(context.TODO(), acdLookupKey, createdACD) + }, timeout).Should(BeNil()) + }) + + AfterEach(func() { + Expect(k8sClient.Delete(context.TODO(), acd)).To(Succeed()) + }) + + It("Cannot change the spec when the lifecycleState is in an intermdeiate state", func() { + var errMsg string = "cannot change the spec when the lifecycleState is in an intermdeiate state" + + acd.Status.LifecycleState = database.AutonomousContainerDatabaseLifecycleStateProvisioning + Expect(k8sClient.Status().Update(context.TODO(), acd)).To(Succeed()) + + acd.Spec.DisplayName = common.String("modified-display-name") + + validateInvalidTest(acd, true, errMsg) + }) + }) +}) diff --git a/apis/database/v1alpha1/autonomousdatabase_conversion.go b/apis/database/v1alpha1/autonomousdatabase_conversion.go new file mode 100644 index 00000000..ffccc181 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabase_conversion.go @@ -0,0 +1,371 @@ +package v1alpha1 + +import ( + "errors" + + v4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AutonomousDatabase to the Hub version (v4). +func (src *AutonomousDatabase) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabase) + // Convert the Spec + dst.Spec.Action = src.Spec.Action + + // Details + dst.Spec.Details.Id = src.Spec.Details.Id + dst.Spec.Details.CompartmentId = src.Spec.Details.CompartmentId + dst.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Details.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Details.DisplayName = src.Spec.Details.DisplayName + dst.Spec.Details.DbName = src.Spec.Details.DbName + dst.Spec.Details.DbWorkload = src.Spec.Details.DbWorkload + dst.Spec.Details.LicenseModel = src.Spec.Details.LicenseModel + dst.Spec.Details.DbVersion = src.Spec.Details.DbVersion + dst.Spec.Details.DataStorageSizeInTBs = src.Spec.Details.DataStorageSizeInTBs + dst.Spec.Details.CpuCoreCount = src.Spec.Details.CpuCoreCount + dst.Spec.Details.ComputeModel = src.Spec.Details.ComputeModel + dst.Spec.Details.ComputeCount = src.Spec.Details.ComputeCount + dst.Spec.Details.OcpuCount = src.Spec.Details.OcpuCount + dst.Spec.Details.AdminPassword.K8sSecret.Name = src.Spec.Details.AdminPassword.K8sSecret.Name + dst.Spec.Details.AdminPassword.OciSecret.Id = src.Spec.Details.AdminPassword.OciSecret.Id + dst.Spec.Details.IsAutoScalingEnabled = src.Spec.Details.IsAutoScalingEnabled + dst.Spec.Details.IsDedicated = src.Spec.Details.IsDedicated + dst.Spec.Details.IsFreeTier = src.Spec.Details.IsFreeTier + dst.Spec.Details.IsAccessControlEnabled = src.Spec.Details.IsAccessControlEnabled + dst.Spec.Details.WhitelistedIps = src.Spec.Details.WhitelistedIps + dst.Spec.Details.SubnetId = src.Spec.Details.SubnetId + dst.Spec.Details.NsgIds = src.Spec.Details.NsgIds + dst.Spec.Details.PrivateEndpointLabel = src.Spec.Details.PrivateEndpointLabel + dst.Spec.Details.IsMtlsConnectionRequired = src.Spec.Details.IsMtlsConnectionRequired + dst.Spec.Details.FreeformTags = src.Spec.Details.FreeformTags + + // Clone + dst.Spec.Clone.CompartmentId = src.Spec.Clone.CompartmentId + dst.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Clone.DisplayName = src.Spec.Clone.DisplayName + dst.Spec.Clone.DbName = src.Spec.Clone.DbName + dst.Spec.Clone.DbWorkload = src.Spec.Clone.DbWorkload + dst.Spec.Clone.LicenseModel = src.Spec.Clone.LicenseModel + dst.Spec.Clone.DbVersion = src.Spec.Clone.DbVersion + dst.Spec.Clone.DataStorageSizeInTBs = src.Spec.Clone.DataStorageSizeInTBs + dst.Spec.Clone.CpuCoreCount = src.Spec.Clone.CpuCoreCount + dst.Spec.Clone.ComputeModel = src.Spec.Clone.ComputeModel + dst.Spec.Clone.ComputeCount = src.Spec.Clone.ComputeCount + dst.Spec.Clone.OcpuCount = src.Spec.Clone.OcpuCount + dst.Spec.Clone.AdminPassword.K8sSecret.Name = src.Spec.Clone.AdminPassword.K8sSecret.Name + dst.Spec.Clone.AdminPassword.OciSecret.Id = src.Spec.Clone.AdminPassword.OciSecret.Id + dst.Spec.Clone.IsAutoScalingEnabled = src.Spec.Clone.IsAutoScalingEnabled + dst.Spec.Clone.IsDedicated = src.Spec.Clone.IsDedicated + dst.Spec.Clone.IsFreeTier = src.Spec.Clone.IsFreeTier + dst.Spec.Clone.IsAccessControlEnabled = src.Spec.Clone.IsAccessControlEnabled + dst.Spec.Clone.WhitelistedIps = src.Spec.Clone.WhitelistedIps + dst.Spec.Clone.SubnetId = src.Spec.Clone.SubnetId + dst.Spec.Clone.NsgIds = src.Spec.Clone.NsgIds + dst.Spec.Clone.PrivateEndpointLabel = src.Spec.Clone.PrivateEndpointLabel + dst.Spec.Clone.IsMtlsConnectionRequired = src.Spec.Clone.IsMtlsConnectionRequired + dst.Spec.Clone.FreeformTags = src.Spec.Clone.FreeformTags + dst.Spec.Clone.CloneType = src.Spec.Clone.CloneType + + // Wallet + dst.Spec.Wallet.Name = src.Spec.Wallet.Name + dst.Spec.Wallet.Password.K8sSecret.Name = src.Spec.Wallet.Password.K8sSecret.Name + dst.Spec.Wallet.Password.OciSecret.Id = src.Spec.Wallet.Password.OciSecret.Id + + dst.Spec.OciConfig.ConfigMapName = src.Spec.OciConfig.ConfigMapName + dst.Spec.OciConfig.SecretName = src.Spec.OciConfig.SecretName + + dst.Spec.HardLink = src.Spec.HardLink + + // Convert the Status + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + dst.Status.WalletExpiringDate = src.Status.WalletExpiringDate + + // convert status.allConnectionStrings + if src.Status.AllConnectionStrings != nil { + for _, srcProfile := range src.Status.AllConnectionStrings { + dstProfile := v4.ConnectionStringProfile{} + + // convert status.allConnectionStrings[i].tlsAuthentication + if val, ok := v4.GetTLSAuthenticationEnumFromString(string(srcProfile.TLSAuthentication)); !ok { + return errors.New("Unable to convert to TLSAuthenticationEnum: " + string(srcProfile.TLSAuthentication)) + } else { + dstProfile.TLSAuthentication = val + } + + // convert status.allConnectionStrings[i].connectionStrings + dstProfile.ConnectionStrings = make([]v4.ConnectionStringSpec, len(srcProfile.ConnectionStrings)) + for i, v := range srcProfile.ConnectionStrings { + dstProfile.ConnectionStrings[i].TNSName = v.TNSName + dstProfile.ConnectionStrings[i].ConnectionString = v.ConnectionString + } + + dst.Status.AllConnectionStrings = append(dst.Status.AllConnectionStrings, dstProfile) + } + } + + dst.Status.Conditions = src.Status.Conditions + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +// ConvertFrom converts from the Hub version (v4) to v1alpha1 +func (dst *AutonomousDatabase) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabase) + + // Convert the Spec + dst.Spec.Action = src.Spec.Action + + // Details + dst.Spec.Details.Id = src.Spec.Details.Id + dst.Spec.Details.CompartmentId = src.Spec.Details.CompartmentId + dst.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Details.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Details.DisplayName = src.Spec.Details.DisplayName + dst.Spec.Details.DbName = src.Spec.Details.DbName + dst.Spec.Details.DbWorkload = src.Spec.Details.DbWorkload + dst.Spec.Details.LicenseModel = src.Spec.Details.LicenseModel + dst.Spec.Details.DbVersion = src.Spec.Details.DbVersion + dst.Spec.Details.DataStorageSizeInTBs = src.Spec.Details.DataStorageSizeInTBs + dst.Spec.Details.CpuCoreCount = src.Spec.Details.CpuCoreCount + dst.Spec.Details.ComputeModel = src.Spec.Details.ComputeModel + dst.Spec.Details.ComputeCount = src.Spec.Details.ComputeCount + dst.Spec.Details.OcpuCount = src.Spec.Details.OcpuCount + dst.Spec.Details.AdminPassword.K8sSecret.Name = src.Spec.Details.AdminPassword.K8sSecret.Name + dst.Spec.Details.AdminPassword.OciSecret.Id = src.Spec.Details.AdminPassword.OciSecret.Id + dst.Spec.Details.IsAutoScalingEnabled = src.Spec.Details.IsAutoScalingEnabled + dst.Spec.Details.IsDedicated = src.Spec.Details.IsDedicated + dst.Spec.Details.IsFreeTier = src.Spec.Details.IsFreeTier + dst.Spec.Details.IsAccessControlEnabled = src.Spec.Details.IsAccessControlEnabled + dst.Spec.Details.WhitelistedIps = src.Spec.Details.WhitelistedIps + dst.Spec.Details.SubnetId = src.Spec.Details.SubnetId + dst.Spec.Details.NsgIds = src.Spec.Details.NsgIds + dst.Spec.Details.PrivateEndpointLabel = src.Spec.Details.PrivateEndpointLabel + dst.Spec.Details.IsMtlsConnectionRequired = src.Spec.Details.IsMtlsConnectionRequired + dst.Spec.Details.FreeformTags = src.Spec.Details.FreeformTags + + // Clone + dst.Spec.Clone.CompartmentId = src.Spec.Clone.CompartmentId + dst.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Clone.DisplayName = src.Spec.Clone.DisplayName + dst.Spec.Clone.DbName = src.Spec.Clone.DbName + dst.Spec.Clone.DbWorkload = src.Spec.Clone.DbWorkload + dst.Spec.Clone.LicenseModel = src.Spec.Clone.LicenseModel + dst.Spec.Clone.DbVersion = src.Spec.Clone.DbVersion + dst.Spec.Clone.DataStorageSizeInTBs = src.Spec.Clone.DataStorageSizeInTBs + dst.Spec.Clone.CpuCoreCount = src.Spec.Clone.CpuCoreCount + dst.Spec.Clone.ComputeModel = src.Spec.Clone.ComputeModel + dst.Spec.Clone.ComputeCount = src.Spec.Clone.ComputeCount + dst.Spec.Clone.OcpuCount = src.Spec.Clone.OcpuCount + dst.Spec.Clone.AdminPassword.K8sSecret.Name = src.Spec.Clone.AdminPassword.K8sSecret.Name + dst.Spec.Clone.AdminPassword.OciSecret.Id = src.Spec.Clone.AdminPassword.OciSecret.Id + dst.Spec.Clone.IsAutoScalingEnabled = src.Spec.Clone.IsAutoScalingEnabled + dst.Spec.Clone.IsDedicated = src.Spec.Clone.IsDedicated + dst.Spec.Clone.IsFreeTier = src.Spec.Clone.IsFreeTier + dst.Spec.Clone.IsAccessControlEnabled = src.Spec.Clone.IsAccessControlEnabled + dst.Spec.Clone.WhitelistedIps = src.Spec.Clone.WhitelistedIps + dst.Spec.Clone.SubnetId = src.Spec.Clone.SubnetId + dst.Spec.Clone.NsgIds = src.Spec.Clone.NsgIds + dst.Spec.Clone.PrivateEndpointLabel = src.Spec.Clone.PrivateEndpointLabel + dst.Spec.Clone.IsMtlsConnectionRequired = src.Spec.Clone.IsMtlsConnectionRequired + dst.Spec.Clone.FreeformTags = src.Spec.Clone.FreeformTags + dst.Spec.Clone.CloneType = src.Spec.Clone.CloneType + + // Wallet + dst.Spec.Wallet.Name = src.Spec.Wallet.Name + dst.Spec.Wallet.Password.K8sSecret.Name = src.Spec.Wallet.Password.K8sSecret.Name + dst.Spec.Wallet.Password.OciSecret.Id = src.Spec.Wallet.Password.OciSecret.Id + + dst.Spec.OciConfig.ConfigMapName = src.Spec.OciConfig.ConfigMapName + dst.Spec.OciConfig.SecretName = src.Spec.OciConfig.SecretName + + dst.Spec.HardLink = src.Spec.HardLink + + // Convert the Status + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + dst.Status.WalletExpiringDate = src.Status.WalletExpiringDate + + // convert status.allConnectionStrings + if src.Status.AllConnectionStrings != nil { + for _, srcProfile := range src.Status.AllConnectionStrings { + dstProfile := ConnectionStringProfile{} + + // convert status.allConnectionStrings[i].tlsAuthentication + if val, ok := GetTLSAuthenticationEnumFromString(string(srcProfile.TLSAuthentication)); !ok { + return errors.New("Unable to convert to TLSAuthenticationEnum: " + string(srcProfile.TLSAuthentication)) + } else { + dstProfile.TLSAuthentication = val + } + + // convert status.allConnectionStrings[i].connectionStrings + dstProfile.ConnectionStrings = make([]ConnectionStringSpec, len(srcProfile.ConnectionStrings)) + for i, v := range srcProfile.ConnectionStrings { + dstProfile.ConnectionStrings[i].TNSName = v.TNSName + dstProfile.ConnectionStrings[i].ConnectionString = v.ConnectionString + } + + dst.Status.AllConnectionStrings = append(dst.Status.AllConnectionStrings, dstProfile) + } + } + + dst.Status.Conditions = src.Status.Conditions + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousDatabaseBackup) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabaseBackup) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.OCID = src.Spec.Target.OciAdb.Ocid + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousDatabaseBackupOCID = src.Spec.AutonomousDatabaseBackupOCID + dst.Spec.IsLongTermBackup = src.Spec.IsLongTermBackup + dst.Spec.RetentionPeriodInDays = src.Spec.RetentionPeriodInDays + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.Type = src.Status.Type + dst.Status.IsAutomatic = src.Status.IsAutomatic + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.AutonomousDatabaseOCID = src.Status.AutonomousDatabaseOCID + dst.Status.CompartmentOCID = src.Status.CompartmentOCID + dst.Status.DBName = src.Status.DBName + dst.Status.DBDisplayName = src.Status.DBDisplayName + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousDatabaseBackup) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabaseBackup) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.Ocid = src.Spec.Target.OciAdb.OCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousDatabaseBackupOCID = src.Spec.AutonomousDatabaseBackupOCID + dst.Spec.IsLongTermBackup = src.Spec.IsLongTermBackup + dst.Spec.RetentionPeriodInDays = src.Spec.RetentionPeriodInDays + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.Type = src.Status.Type + dst.Status.IsAutomatic = src.Status.IsAutomatic + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.AutonomousDatabaseOCID = src.Status.AutonomousDatabaseOCID + dst.Status.CompartmentOCID = src.Status.CompartmentOCID + dst.Status.DBName = src.Status.DBName + dst.Status.DBDisplayName = src.Status.DBDisplayName + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousDatabaseRestore) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabaseRestore) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.OCID = src.Spec.Target.OciAdb.Ocid + dst.Spec.Source.K8sAdbBackup.Name = src.Spec.Source.K8sAdbBackup.Name + dst.Spec.Source.PointInTime.Timestamp = src.Spec.Source.PointInTime.Timestamp + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.DisplayName = src.Status.DisplayName + dst.Status.TimeAccepted = src.Status.TimeAccepted + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.DbName = src.Status.DbName + dst.Status.WorkRequestOCID = src.Status.WorkRequestOCID + dst.Status.Status = src.Status.Status + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousDatabaseRestore) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabaseRestore) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.Ocid = src.Spec.Target.OciAdb.OCID + dst.Spec.Source.K8sAdbBackup.Name = src.Spec.Source.K8sAdbBackup.Name + dst.Spec.Source.PointInTime.Timestamp = src.Spec.Source.PointInTime.Timestamp + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.DisplayName = src.Status.DisplayName + dst.Status.TimeAccepted = src.Status.TimeAccepted + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.DbName = src.Status.DbName + dst.Status.WorkRequestOCID = src.Status.WorkRequestOCID + dst.Status.Status = src.Status.Status + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousContainerDatabase) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousContainerDatabase) + + dst.Spec.AutonomousContainerDatabaseOCID = src.Spec.AutonomousContainerDatabaseOCID + dst.Spec.CompartmentOCID = src.Spec.CompartmentOCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousExadataVMClusterOCID = src.Spec.AutonomousExadataVMClusterOCID + dst.Spec.PatchModel = src.Spec.PatchModel + + if val, ok := v4.GetAcdActionEnumFromString(string(src.Spec.Action)); !ok { + return errors.New("Unable to convert to AcdActionEnum: " + string(src.Spec.Action)) + } else { + dst.Spec.Action = val + } + + dst.Spec.FreeformTags = src.Spec.FreeformTags + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + dst.Spec.HardLink = src.Spec.HardLink + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousContainerDatabase) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousContainerDatabase) + + dst.Spec.AutonomousContainerDatabaseOCID = src.Spec.AutonomousContainerDatabaseOCID + dst.Spec.CompartmentOCID = src.Spec.CompartmentOCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousExadataVMClusterOCID = src.Spec.AutonomousExadataVMClusterOCID + dst.Spec.PatchModel = src.Spec.PatchModel + + if val, ok := GetAcdActionEnumFromString(string(src.Spec.Action)); !ok { + return errors.New("Unable to convert to AcdActionEnum: " + string(src.Spec.Action)) + } else { + dst.Spec.Action = val + } + + dst.Spec.FreeformTags = src.Spec.FreeformTags + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + dst.Spec.HardLink = src.Spec.HardLink + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + + dst.ObjectMeta = src.ObjectMeta + return nil +} diff --git a/apis/database/v1alpha1/autonomousdatabase_types.go b/apis/database/v1alpha1/autonomousdatabase_types.go index 4916ad46..099703c2 100644 --- a/apis/database/v1alpha1/autonomousdatabase_types.go +++ b/apis/database/v1alpha1/autonomousdatabase_types.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -39,14 +39,8 @@ package v1alpha1 import ( - "encoding/json" - "strconv" - - "github.com/oracle/oci-go-sdk/v45/database" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/oracle/oracle-database-operator/commons/annotations" + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -55,170 +49,172 @@ import ( // AutonomousDatabaseSpec defines the desired state of AutonomousDatabase // Important: Run "make" to regenerate code after modifying this file type AutonomousDatabaseSpec struct { - Details AutonomousDatabaseDetails `json:"details"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:validation:Enum:="";Create;Sync;Update;Stop;Start;Terminate;Clone + Action string `json:"action"` + Details AutonomousDatabaseDetails `json:"details,omitempty"` + Clone AutonomousDatabaseClone `json:"clone,omitempty"` + Wallet WalletSpec `json:"wallet,omitempty"` + OciConfig OciConfigSpec `json:"ociConfig,omitempty"` // +kubebuilder:default:=false HardLink *bool `json:"hardLink,omitempty"` } -type OCIConfigSpec struct { - ConfigMapName *string `json:"configMapName,omitempty"` - SecretName *string `json:"secretName,omitempty"` +type AutonomousDatabaseDetails struct { + AutonomousDatabaseBase `json:",inline"` + Id *string `json:"id,omitempty"` } -// AutonomousDatabaseDetails defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase -type AutonomousDatabaseDetails struct { - AutonomousDatabaseOCID *string `json:"autonomousDatabaseOCID,omitempty"` - CompartmentOCID *string `json:"compartmentOCID,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - DbName *string `json:"dbName,omitempty"` - // +kubebuilder:validation:Enum:=OLTP;DW;AJD;APEX - DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` - IsDedicated *bool `json:"isDedicated,omitempty"` - DbVersion *string `json:"dbVersion,omitempty"` - DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` - CPUCoreCount *int `json:"cpuCoreCount,omitempty"` - AdminPassword PasswordSpec `json:"adminPassword,omitempty"` - IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` - LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` - SubnetOCID *string `json:"subnetOCID,omitempty"` - NsgOCIDs []string `json:"nsgOCIDs,omitempty"` - PrivateEndpoint *string `json:"privateEndpoint,omitempty"` - PrivateEndpointLabel *string `json:"privateEndpointLabel,omitempty"` - PrivateEndpointIP *string `json:"privateEndpointIP,omitempty"` - FreeformTags map[string]string `json:"freeformTags,omitempty"` - Wallet WalletSpec `json:"wallet,omitempty"` +type AutonomousDatabaseClone struct { + AutonomousDatabaseBase `json:",inline"` + // +kubebuilder:validation:Enum:="FULL";"METADATA" + CloneType database.CreateAutonomousDatabaseCloneDetailsCloneTypeEnum `json:"cloneType,omitempty"` } -type WalletSpec struct { - Name *string `json:"name,omitempty"` - Password PasswordSpec `json:"password,omitempty"` +// AutonomousDatabaseBase defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase +type AutonomousDatabaseBase struct { + CompartmentId *string `json:"compartmentId,omitempty"` + AutonomousContainerDatabase AcdSpec `json:"autonomousContainerDatabase,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DbName *string `json:"dbName,omitempty"` + // +kubebuilder:validation:Enum:="OLTP";"DW";"AJD";"APEX" + DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` + // +kubebuilder:validation:Enum:="LICENSE_INCLUDED";"BRING_YOUR_OWN_LICENSE" + LicenseModel database.AutonomousDatabaseLicenseModelEnum `json:"licenseModel,omitempty"` + DbVersion *string `json:"dbVersion,omitempty"` + DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` + CpuCoreCount *int `json:"cpuCoreCount,omitempty"` + // +kubebuilder:validation:Enum:="ECPU";"OCPU" + ComputeModel database.AutonomousDatabaseComputeModelEnum `json:"computeModel,omitempty"` + ComputeCount *float32 `json:"computeCount,omitempty"` + OcpuCount *float32 `json:"ocpuCount,omitempty"` + AdminPassword PasswordSpec `json:"adminPassword,omitempty"` + IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` + IsDedicated *bool `json:"isDedicated,omitempty"` + IsFreeTier *bool `json:"isFreeTier,omitempty"` + + // NetworkAccess + IsAccessControlEnabled *bool `json:"isAccessControlEnabled,omitempty"` + WhitelistedIps []string `json:"whitelistedIps,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` + NsgIds []string `json:"nsgIds,omitempty"` + PrivateEndpointLabel *string `json:"privateEndpointLabel,omitempty"` + IsMtlsConnectionRequired *bool `json:"isMtlsConnectionRequired,omitempty"` + + FreeformTags map[string]string `json:"freeformTags,omitempty"` +} + +/************************ +* ACD specs +************************/ +type K8sAcdSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAcdSpec struct { + Id *string `json:"id,omitempty"` +} + +// AcdSpec defines the spec of the target for backup/restore runs. +// The name could be the name of an AutonomousDatabase or an AutonomousDatabaseBackup +type AcdSpec struct { + K8sAcd K8sAcdSpec `json:"k8sAcd,omitempty"` + OciAcd OciAcdSpec `json:"ociAcd,omitempty"` +} + +/************************ +* Secret specs +************************/ +type K8sSecretSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciSecretSpec struct { + Id *string `json:"id,omitempty"` } type PasswordSpec struct { - K8sSecretName *string `json:"k8sSecretName,omitempty"` - OCISecretOCID *string `json:"ociSecretOCID,omitempty"` + K8sSecret K8sSecretSpec `json:"k8sSecret,omitempty"` + OciSecret OciSecretSpec `json:"ociSecret,omitempty"` +} + +type WalletSpec struct { + Name *string `json:"name,omitempty"` + Password PasswordSpec `json:"password,omitempty"` } // AutonomousDatabaseStatus defines the observed state of AutonomousDatabase type AutonomousDatabaseStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - DisplayName string `json:"displayName,omitempty"` - LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` - IsDedicated string `json:"isDedicated,omitempty"` - CPUCoreCount int `json:"cpuCoreCount,omitempty"` - DataStorageSizeInTBs int `json:"dataStorageSizeInTBs,omitempty"` - DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` - TimeCreated string `json:"timeCreated,omitempty"` + // Lifecycle State of the ADB + LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` + // Creation time of the ADB + TimeCreated string `json:"timeCreated,omitempty"` + // Expiring date of the instance wallet + WalletExpiringDate string `json:"walletExpiringDate,omitempty"` + // Connection Strings of the ADB + AllConnectionStrings []ConnectionStringProfile `json:"allConnectionStrings,omitempty"` + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type TLSAuthenticationEnum string + +const ( + tlsAuthenticationTLS TLSAuthenticationEnum = "TLS" + tlsAuthenticationMTLS TLSAuthenticationEnum = "Mutual TLS" +) + +func GetTLSAuthenticationEnumFromString(val string) (TLSAuthenticationEnum, bool) { + var mappingTLSAuthenticationEnum = map[string]TLSAuthenticationEnum{ + "TLS": tlsAuthenticationTLS, + "Mutual TLS": tlsAuthenticationMTLS, + } + + enum, ok := mappingTLSAuthenticationEnum[val] + return enum, ok +} + +type ConnectionStringProfile struct { + TLSAuthentication TLSAuthenticationEnum `json:"tlsAuthentication,omitempty"` + ConnectionStrings []ConnectionStringSpec `json:"connectionStrings"` +} + +type ConnectionStringSpec struct { + TNSName string `json:"tnsName,omitempty"` + ConnectionString string `json:"connectionString,omitempty"` } // AutonomousDatabase is the Schema for the autonomousdatabases API // +kubebuilder:object:root=true // +kubebuilder:resource:shortName="adb";"adbs" // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:JSONPath=".status.displayName",name="Display Name",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.displayName",name="Display Name",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbName",name="Db Name",type=string // +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string -// +kubebuilder:printcolumn:JSONPath=".status.isDedicated",name="Dedicated",type=string -// +kubebuilder:printcolumn:JSONPath=".status.cpuCoreCount",name="OCPUs",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.dataStorageSizeInTBs",name="Storage (TB)",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.dbWorkload",name="Workload Type",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.isDedicated",name="Dedicated",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.cpuCoreCount",name="OCPUs",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dataStorageSizeInTBs",name="Storage (TB)",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbWorkload",name="Workload Type",type=string // +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string type AutonomousDatabase struct { - metaV1.TypeMeta `json:",inline"` - metaV1.ObjectMeta `json:"metadata,omitempty"` + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` Spec AutonomousDatabaseSpec `json:"spec,omitempty"` Status AutonomousDatabaseStatus `json:"status,omitempty"` } -// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec -const LastSuccessfulSpec string = "lastSuccessfulSpec" - -// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. -// Returns nil, nil if there is no lastSuccessfulSpec. -func (adb *AutonomousDatabase) GetLastSuccessfulSpec() (*AutonomousDatabaseSpec, error) { - val, ok := adb.GetAnnotations()[LastSuccessfulSpec] - if !ok { - return nil, nil - } - - specBytes := []byte(val) - sucSpec := AutonomousDatabaseSpec{} - - err := json.Unmarshal(specBytes, &sucSpec) - if err != nil { - return nil, err - } - - return &sucSpec, nil -} - -// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. -func (adb *AutonomousDatabase) UpdateLastSuccessfulSpec(kubeClient client.Client) error { - specBytes, err := json.Marshal(adb.Spec) - if err != nil { - return err - } - - anns := map[string]string{ - LastSuccessfulSpec: string(specBytes), - } - - return annotations.SetAnnotations(kubeClient, adb, anns) -} - -// UpdateAttrFromOCIAutonomousDatabase updates the attributes from database.AutonomousDatabase object and returns the resource -func (adb *AutonomousDatabase) UpdateAttrFromOCIAutonomousDatabase(ociObj database.AutonomousDatabase) *AutonomousDatabase { - adb.Spec.Details.AutonomousDatabaseOCID = ociObj.Id - adb.Spec.Details.CompartmentOCID = ociObj.CompartmentId - adb.Spec.Details.DisplayName = ociObj.DisplayName - adb.Spec.Details.DbName = ociObj.DbName - adb.Spec.Details.DbWorkload = ociObj.DbWorkload - adb.Spec.Details.IsDedicated = ociObj.IsDedicated - adb.Spec.Details.DbVersion = ociObj.DbVersion - adb.Spec.Details.DataStorageSizeInTBs = ociObj.DataStorageSizeInTBs - adb.Spec.Details.CPUCoreCount = ociObj.CpuCoreCount - adb.Spec.Details.IsAutoScalingEnabled = ociObj.IsAutoScalingEnabled - adb.Spec.Details.LifecycleState = ociObj.LifecycleState - adb.Spec.Details.FreeformTags = ociObj.FreeformTags - - adb.Spec.Details.SubnetOCID = ociObj.SubnetId - adb.Spec.Details.NsgOCIDs = ociObj.NsgIds - adb.Spec.Details.PrivateEndpoint = ociObj.PrivateEndpoint - adb.Spec.Details.PrivateEndpointLabel = ociObj.PrivateEndpointLabel - adb.Spec.Details.PrivateEndpointIP = ociObj.PrivateEndpointIp - - // update the subresource as well - adb.Status.DisplayName = *ociObj.DisplayName - adb.Status.LifecycleState = ociObj.LifecycleState - adb.Status.IsDedicated = strconv.FormatBool(*ociObj.IsDedicated) - adb.Status.CPUCoreCount = *ociObj.CpuCoreCount - adb.Status.DataStorageSizeInTBs = *ociObj.DataStorageSizeInTBs - adb.Status.DbWorkload = ociObj.DbWorkload - adb.Status.TimeCreated = ociObj.TimeCreated.String() - - return adb -} - // +kubebuilder:object:root=true // AutonomousDatabaseList contains a list of AutonomousDatabase type AutonomousDatabaseList struct { - metaV1.TypeMeta `json:",inline"` - metaV1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` Items []AutonomousDatabase `json:"items"` } func init() { SchemeBuilder.Register(&AutonomousDatabase{}, &AutonomousDatabaseList{}) } - -// A helper function which is useful for debugging. The function prints out a structural JSON format. -func (adb *AutonomousDatabase) String() (string, error) { - out, err := json.MarshalIndent(adb, "", " ") - if err != nil { - return "", err - } - return string(out), nil -} diff --git a/apis/database/v1alpha1/autonomousdatabase_webhook.go b/apis/database/v1alpha1/autonomousdatabase_webhook.go new file mode 100644 index 00000000..e209ae7a --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabase_webhook.go @@ -0,0 +1,171 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaselog = logf.Log.WithName("autonomousdatabase-resource") + +func (r *AutonomousDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v1alpha1,name=vautonomousdatabasev1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +// ValidateCreate checks if the spec is valid for a provisioning or a binding operation +func (r *AutonomousDatabase) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + autonomousdatabaselog.Info("validate create", "name", r.Name) + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldAdb *AutonomousDatabase = old.(*AutonomousDatabase) + + autonomousdatabaselog.Info("validate update", "name", r.Name) + + // skip the update of adding ADB OCID or binding + // if oldAdb.Status.LifecycleState == "" { + // return nil, nil + // } + + // cannot update when the old state is in intermediate, except for the change to the hardLink or the terminate operatrion during valid lifecycleState + // var copySpec *AutonomousDatabaseSpec = r.Spec.DeepCopy() + // specChanged, err := dbv4.RemoveUnchangedFields(oldAdb.Spec, copySpec) + // if err != nil { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), err.Error())) + // } + + // hardLinkChanged := copySpec.HardLink != nil + + // isTerminateOp := dbv4.CanBeTerminated(oldAdb.Status.LifecycleState) && copySpec.Action == "Terminate" + + // if specChanged && dbv4.IsAdbIntermediateState(oldAdb.Status.LifecycleState) && !isTerminateOp && !hardLinkChanged { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), + // "cannot change the spec when the lifecycleState is in an intermdeiate state")) + // } + + // cannot modify autonomousDatabaseOCID + if r.Spec.Details.Id != nil && + oldAdb.Spec.Details.Id != nil && + *r.Spec.Details.Id != *oldAdb.Spec.Details.Id { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("autonomousDatabaseOCID"), + "autonomousDatabaseOCID cannot be modified")) + } + + allErrs = validateCommon(r, allErrs) + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +func validateCommon(adb *AutonomousDatabase, allErrs field.ErrorList) field.ErrorList { + // password + if adb.Spec.Details.AdminPassword.K8sSecret.Name != nil && adb.Spec.Details.AdminPassword.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("adminPassword"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + if adb.Spec.Wallet.Password.K8sSecret.Name != nil && adb.Spec.Wallet.Password.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("wallet").Child("password"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaselog.Info("validate delete", "name", r.Name) + return nil, nil +} + +// Returns true if AutonomousContainerDatabaseOCID has value. +// We don't use Details.IsDedicated because the parameter might be null when it's a provision operation. +func isDedicated(adb *AutonomousDatabase) bool { + return adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name != nil || + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id != nil +} diff --git a/apis/database/v1alpha1/autonomousdatabase_webhook_test.go b/apis/database/v1alpha1/autonomousdatabase_webhook_test.go new file mode 100644 index 00000000..8949f8f4 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabase_webhook_test.go @@ -0,0 +1,216 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import ( + "context" + "encoding/json" + "time" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + // +kubebuilder:scaffold:imports +) + +var _ = Describe("test AutonomousDatabase webhook", func() { + Describe("Test ValidateCreate of the AutonomousDatabase validating webhook", func() { + var ( + resourceName = "testadb" + namespace = "default" + + adb *AutonomousDatabase + ) + + BeforeEach(func() { + adb = &AutonomousDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousDatabaseSpec{ + Details: AutonomousDatabaseDetails{ + AutonomousDatabaseBase: AutonomousDatabaseBase{ + CompartmentId: common.String("fake-compartment-ocid"), + DbName: common.String("fake-dbName"), + DisplayName: common.String("fake-displayName"), + CpuCoreCount: common.Int(1), + AdminPassword: PasswordSpec{ + K8sSecret: K8sSecretSpec{ + Name: common.String("fake-admin-password"), + }, + }, + DataStorageSizeInTBs: common.Int(1), + }, + }, + }, + } + }) + + // Common validation + It("Should not apply values to adminPassword.k8sSecret and adminPassword.ociSecret at the same time", func() { + var errMsg string = "cannot apply k8sSecret.name and ociSecret.ocid at the same time" + + adb.Spec.Details.AdminPassword.K8sSecret.Name = common.String("test-admin-password") + adb.Spec.Details.AdminPassword.OciSecret.Id = common.String("fake.ocid1.vaultsecret.oc1...") + + validateInvalidTest(adb, false, errMsg) + }) + + It("Should not apply values to wallet.password.k8sSecret and wallet.password.ociSecret at the same time", func() { + var errMsg string = "cannot apply k8sSecret.name and ociSecret.ocid at the same time" + + adb.Spec.Wallet.Password.K8sSecret.Name = common.String("test-wallet-password") + adb.Spec.Wallet.Password.OciSecret.Id = common.String("fake.ocid1.vaultsecret.oc1...") + + validateInvalidTest(adb, false, errMsg) + }) + + Context("Dedicated Autonomous Database", func() { + BeforeEach(func() { + adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = common.String("testACD") + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = common.String("fake-acd-ocid") + }) + + It("AccessControlList cannot be empty when the network access type is RESTRICTED", func() { + var errMsg string = "access control list cannot be provided when Autonomous Database's access control is disabled" + + adb.Spec.Details.IsAccessControlEnabled = common.Bool(false) + adb.Spec.Details.WhitelistedIps = []string{"192.168.1.1"} + + validateInvalidTest(adb, false, errMsg) + }) + + It("AccessControlList cannot be empty when the network access type is RESTRICTED", func() { + var errMsg string = "isMTLSConnectionRequired is not supported on a dedicated database" + + adb.Spec.Details.IsMtlsConnectionRequired = common.Bool(true) + + validateInvalidTest(adb, false, errMsg) + }) + + }) + }) + + // Skip the common and network validations since they're already verified in the test for ValidateCreate + Describe("Test ValidateUpdate of the AutonomousDatabase validating webhook", func() { + var ( + resourceName = "testadb" + namespace = "default" + adbLookupKey = types.NamespacedName{Name: resourceName, Namespace: namespace} + + adb *AutonomousDatabase + + timeout = time.Second * 5 + ) + + BeforeEach(func() { + adb = &AutonomousDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousDatabaseSpec{ + Action: "Create", + Details: AutonomousDatabaseDetails{ + Id: common.String("fake-adb-ocid"), + AutonomousDatabaseBase: AutonomousDatabaseBase{ + CompartmentId: common.String("fake-compartment-ocid"), + DbName: common.String("fake-dbName"), + DisplayName: common.String("fake-displayName"), + CpuCoreCount: common.Int(1), + DataStorageSizeInTBs: common.Int(1), + }, + }, + }, + } + + specBytes, err := json.Marshal(adb.Spec) + Expect(err).To(BeNil()) + + anns := map[string]string{ + LastSuccessfulSpec: string(specBytes), + } + adb.SetAnnotations(anns) + + Expect(k8sClient.Create(context.TODO(), adb)).To(Succeed()) + + // Change the lifecycleState to AVAILABLE + adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateAvailable + Expect(k8sClient.Status().Update(context.TODO(), adb)).To(Succeed()) + + // Make sure the object is created + Eventually(func() error { + createdADB := &AutonomousDatabase{} + return k8sClient.Get(context.TODO(), adbLookupKey, createdADB) + }, timeout).Should(BeNil()) + }) + + AfterEach(func() { + Expect(k8sClient.Delete(context.TODO(), adb)).To(Succeed()) + }) + + It("Cannot change the spec when the lifecycleState is in an intermdeiate state", func() { + var errMsg string = "cannot change the spec when the lifecycleState is in an intermdeiate state" + + adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUpdating + Expect(k8sClient.Status().Update(context.TODO(), adb)).To(Succeed()) + + adb.Spec.Details.DbName = common.String("modified-db-name") + + validateInvalidTest(adb, true, errMsg) + }) + + It("AutonomousDatabaseOCID cannot be modified", func() { + var errMsg string = "autonomousDatabaseOCID cannot be modified" + + adb.Spec.Details.Id = common.String("modified-adb-ocid") + + validateInvalidTest(adb, true, errMsg) + }) + }) +}) diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_types.go b/apis/database/v1alpha1/autonomousdatabasebackup_types.go new file mode 100644 index 00000000..aa70c2d5 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabasebackup_types.go @@ -0,0 +1,126 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AutonomousDatabaseBackupSpec defines the desired state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` + IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` + RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupStatus struct { + LifecycleState database.AutonomousDatabaseBackupLifecycleStateEnum `json:"lifecycleState"` + Type database.AutonomousDatabaseBackupTypeEnum `json:"type"` + IsAutomatic bool `json:"isAutomatic"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + AutonomousDatabaseOCID string `json:"autonomousDatabaseOCID"` + CompartmentOCID string `json:"compartmentOCID"` + DBName string `json:"dbName"` + DBDisplayName string `json:"dbDisplayName"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbbu";"adbbus" +//+kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.dbDisplayName",name="DB DisplayName",type=string +//+kubebuilder:printcolumn:JSONPath=".status.type",name="Type",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeStarted",name="Started",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeEnded",name="Ended",type=string + +// AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups API +type AutonomousDatabaseBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseBackupSpec `json:"spec,omitempty"` + Status AutonomousDatabaseBackupStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseBackupList contains a list of AutonomousDatabaseBackup +type AutonomousDatabaseBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseBackup{}, &AutonomousDatabaseBackupList{}) +} + +func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database.AutonomousDatabaseBackup, ociAdb database.AutonomousDatabase) { + b.Status.AutonomousDatabaseOCID = *ociBackup.AutonomousDatabaseId + b.Status.CompartmentOCID = *ociBackup.CompartmentId + b.Status.Type = ociBackup.Type + b.Status.IsAutomatic = *ociBackup.IsAutomatic + + b.Status.LifecycleState = ociBackup.LifecycleState + + b.Status.TimeStarted = dbv4.FormatSDKTime(ociBackup.TimeStarted) + b.Status.TimeEnded = dbv4.FormatSDKTime(ociBackup.TimeEnded) + + b.Status.DBDisplayName = *ociAdb.DisplayName + b.Status.DBName = *ociAdb.DbName +} + +// GetTimeEnded returns the status.timeEnded in SDKTime format +func (b *AutonomousDatabaseBackup) GetTimeEnded() (*common.SDKTime, error) { + return dbv4.ParseDisplayTime(b.Status.TimeEnded) +} diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go b/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go new file mode 100644 index 00000000..ffa9b888 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go @@ -0,0 +1,158 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabasebackuplog = logf.Log.WithName("autonomousdatabasebackup-resource") + +func (r *AutonomousDatabaseBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=create;update,versions=v1alpha1,name=mautonomousdatabasebackupv1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &AutonomousDatabaseBackup{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) Default() { + autonomousdatabasebackuplog.Info("default", "name", r.Name) +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,versions=v1alpha1,name=vautonomousdatabasebackupv1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseBackup{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateCreate() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.Ocid == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.Ocid != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB or ociADB, but not both")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + oldBackup := old.(*AutonomousDatabaseBackup) + + if oldBackup.Spec.AutonomousDatabaseBackupOCID != nil && r.Spec.AutonomousDatabaseBackupOCID != nil && + *oldBackup.Spec.AutonomousDatabaseBackupOCID != *r.Spec.AutonomousDatabaseBackupOCID { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("autonomousDatabaseBackupOCID"), + "cannot assign a new autonomousDatabaseBackupOCID to this backup")) + } + + if oldBackup.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.K8sAdb.Name != nil && + *oldBackup.Spec.Target.K8sAdb.Name != *r.Spec.Target.K8sAdb.Name { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("k8sADB").Child("name"), "cannot assign a new name to the target")) + } + + if oldBackup.Spec.Target.OciAdb.Ocid != nil && r.Spec.Target.OciAdb.Ocid != nil && + *oldBackup.Spec.Target.OciAdb.Ocid != *r.Spec.Target.OciAdb.Ocid { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("ociADB").Child("ocid"), "cannot assign a new ocid to the target")) + } + + if oldBackup.Spec.DisplayName != nil && r.Spec.DisplayName != nil && + *oldBackup.Spec.DisplayName != *r.Spec.DisplayName { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("displayName"), "cannot assign a new displayName to this backup")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateDelete() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go b/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go new file mode 100644 index 00000000..87eb1618 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go @@ -0,0 +1,172 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import ( + "context" + "time" + + "github.com/oracle/oci-go-sdk/v65/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + // +kubebuilder:scaffold:imports +) + +var _ = Describe("test AutonomousDatabaseBackup webhook", func() { + Describe("Test ValidateCreate of the AutonomousDatabaseBackup validating webhook", func() { + var ( + resourceName = "testadbbackup" + namespace = "default" + + backup *AutonomousDatabaseBackup + ) + + BeforeEach(func() { + backup = &AutonomousDatabaseBackup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabaseBackup", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousDatabaseBackupSpec{ + Target: TargetSpec{}, + }, + } + }) + + It("Should specify at least one of the k8sADB and ociADB", func() { + var errMsg string = "target ADB is empty" + + backup.Spec.Target.K8sAdb.Name = nil + backup.Spec.Target.OciAdb.Ocid = nil + + validateInvalidTest(backup, false, errMsg) + }) + + It("Should specify either k8sADB or ociADB, but not both", func() { + var errMsg string = "specify either k8sADB or ociADB, but not both" + + backup.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") + backup.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") + + validateInvalidTest(backup, false, errMsg) + }) + }) + + Describe("Test ValidateUpdate of the AutonomousDatabaseBackup validating webhook", func() { + var ( + resourceName = "testadbbackup" + namespace = "default" + backupLookupKey = types.NamespacedName{Name: resourceName, Namespace: namespace} + + backup *AutonomousDatabaseBackup + + timeout = time.Second * 5 + ) + + BeforeEach(func() { + backup = &AutonomousDatabaseBackup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabaseBackup", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousDatabaseBackupSpec{ + AutonomousDatabaseBackupOCID: common.String("fake.ocid1.autonomousdatabasebackup.oc1..."), + DisplayName: common.String("fake-displayName"), + }, + } + }) + + JustBeforeEach(func() { + Expect(k8sClient.Create(context.TODO(), backup)).To(Succeed()) + + // Make sure the object is created + Eventually(func() error { + createdBackup := &AutonomousDatabaseBackup{} + return k8sClient.Get(context.TODO(), backupLookupKey, createdBackup) + }, timeout).Should(BeNil()) + }) + + AfterEach(func() { + Expect(k8sClient.Delete(context.TODO(), backup)).To(Succeed()) + }) + + Context("The bakcup is using target.k8sAdb.name", func() { + BeforeEach(func() { + backup.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") + }) + + It("Cannot assign a new name to the target", func() { + var errMsg string = "cannot assign a new name to the target" + + backup.Spec.Target.K8sAdb.Name = common.String("modified-target-adb") + + validateInvalidTest(backup, true, errMsg) + }) + + It("Cannot assign a new displayName to this backup", func() { + var errMsg string = "cannot assign a new displayName to this backup" + + backup.Spec.DisplayName = common.String("modified-displayName") + + validateInvalidTest(backup, true, errMsg) + }) + }) + + Context("The bakcup is using target.ociADB.ocid", func() { + BeforeEach(func() { + backup.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") + }) + + It("Cannot assign a new ocid to the target", func() { + var errMsg string = "cannot assign a new ocid to the target" + + backup.Spec.Target.OciAdb.Ocid = common.String("modified.ocid1.autonomousdatabase.oc1...") + + validateInvalidTest(backup, true, errMsg) + }) + }) + }) +}) diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_types.go b/apis/database/v1alpha1/autonomousdatabaserestore_types.go new file mode 100644 index 00000000..ef8698b2 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabaserestore_types.go @@ -0,0 +1,139 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type K8sAdbBackupSpec struct { + Name *string `json:"name,omitempty"` +} + +type PitSpec struct { + // The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT + Timestamp *string `json:"timestamp,omitempty"` +} + +type SourceSpec struct { + K8sAdbBackup K8sAdbBackupSpec `json:"k8sADBBackup,omitempty"` + PointInTime PitSpec `json:"pointInTime,omitempty"` +} + +// AutonomousDatabaseRestoreSpec defines the desired state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target"` + Source SourceSpec `json:"source"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseRestoreStatus defines the observed state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + DisplayName string `json:"displayName"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + DbName string `json:"dbName"` + WorkRequestOCID string `json:"workRequestOCID"` + Status workrequests.WorkRequestStatusEnum `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbr";"adbrs" +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.displayName",name="DbDisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.dbName",name="DbName",type=string + +// AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores API +type AutonomousDatabaseRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseRestoreSpec `json:"spec,omitempty"` + Status AutonomousDatabaseRestoreStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseRestoreList contains a list of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseRestore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseRestore{}, &AutonomousDatabaseRestoreList{}) +} + +// GetPIT returns the spec.pointInTime.timeStamp in SDKTime format +func (r *AutonomousDatabaseRestore) GetPIT() (*common.SDKTime, error) { + if r.Spec.Source.PointInTime.Timestamp == nil { + return nil, errors.New("the timestamp is empty") + } + return dbv4.ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) +} + +func (r *AutonomousDatabaseRestore) UpdateStatus( + adb database.AutonomousDatabase, + workResp workrequests.GetWorkRequestResponse) { + + r.Status.DisplayName = *adb.DisplayName + r.Status.DbName = *adb.DbName + + r.Status.WorkRequestOCID = *workResp.Id + r.Status.Status = workResp.Status + r.Status.TimeAccepted = dbv4.FormatSDKTime(workResp.TimeAccepted) + r.Status.TimeStarted = dbv4.FormatSDKTime(workResp.TimeStarted) + r.Status.TimeEnded = dbv4.FormatSDKTime(workResp.TimeFinished) +} diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go b/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go new file mode 100644 index 00000000..dcd57137 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go @@ -0,0 +1,147 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaserestorelog = logf.Log.WithName("autonomousdatabaserestore-resource") + +func (r *AutonomousDatabaseRestore) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabaserestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabaserestores,versions=v1alpha1,name=vautonomousdatabaserestorev1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseRestore{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateCreate() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + // Validate the target ADB + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.Ocid == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.Ocid != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB.name or ociADB.ocid, but not both")) + } + + // Validate the restore source + if r.Spec.Source.K8sAdbBackup.Name == nil && + r.Spec.Source.PointInTime.Timestamp == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "retore source is empty")) + } + + if r.Spec.Source.K8sAdbBackup.Name != nil && + r.Spec.Source.PointInTime.Timestamp != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "cannot apply backupName and the PITR parameters at the same time")) + } + + // Verify the timestamp format if it's PITR + if r.Spec.Source.PointInTime.Timestamp != nil { + _, err := dbv4.ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source").Child("pointInTime").Child("timestamp"), "invalid timestamp format")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go b/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go new file mode 100644 index 00000000..0cc9b692 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go @@ -0,0 +1,115 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import ( + "github.com/oracle/oci-go-sdk/v65/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + // +kubebuilder:scaffold:imports +) + +var _ = Describe("test AutonomousDatabaseRestore webhook", func() { + Describe("Test ValidateCreate of the AutonomousDatabaseRestore validating webhook", func() { + var ( + resourceName = "testadbrestore" + namespace = "default" + + restore *AutonomousDatabaseRestore + ) + + BeforeEach(func() { + restore = &AutonomousDatabaseRestore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabaseRestore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespace, + }, + Spec: AutonomousDatabaseRestoreSpec{ + Target: TargetSpec{}, + }, + } + }) + + It("Should specify at least one of the k8sADB and ociADB", func() { + var errMsg string = "target ADB is empty" + + restore.Spec.Target.K8sAdb.Name = nil + restore.Spec.Target.OciAdb.Ocid = nil + + validateInvalidTest(restore, false, errMsg) + }) + + It("Should specify either k8sADB.name or ociADB.ocid, but not both", func() { + var errMsg string = "specify either k8sADB.name or ociADB.ocid, but not both" + + restore.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") + restore.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") + + validateInvalidTest(restore, false, errMsg) + }) + + It("Should select at least one restore source", func() { + var errMsg string = "retore source is empty" + + restore.Spec.Source.K8sAdbBackup.Name = nil + restore.Spec.Source.PointInTime.Timestamp = nil + + validateInvalidTest(restore, false, errMsg) + }) + + It("Cannot apply backupName and the PITR parameters at the same time", func() { + var errMsg string = "cannot apply backupName and the PITR parameters at the same time" + + restore.Spec.Source.K8sAdbBackup.Name = common.String("fake-source-adb-backup") + restore.Spec.Source.PointInTime.Timestamp = common.String("2021-12-23 11:03:13 UTC") + + validateInvalidTest(restore, false, errMsg) + }) + + It("Invalid timestamp format", func() { + var errMsg string = "invalid timestamp format" + + restore.Spec.Source.PointInTime.Timestamp = common.String("12/23/2021 11:03:13") + + validateInvalidTest(restore, false, errMsg) + }) + }) +}) diff --git a/apis/database/v1alpha1/cdb_types.go b/apis/database/v1alpha1/cdb_types.go new file mode 100644 index 00000000..f97df391 --- /dev/null +++ b/apis/database/v1alpha1/cdb_types.go @@ -0,0 +1,190 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CDBSpec defines the desired state of CDB +type CDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // Name of the CDB Service + ServiceName string `json:"serviceName,omitempty"` + + // Password for the CDB System Administrator + SysAdminPwd CDBSysAdminPassword `json:"sysAdminPwd,omitempty"` + // User in the root container with sysdba priviledges to manage PDB lifecycle + CDBAdminUser CDBAdminUser `json:"cdbAdminUser,omitempty"` + // Password for the CDB Administrator to manage PDB lifecycle + CDBAdminPwd CDBAdminPassword `json:"cdbAdminPwd,omitempty"` + + CDBTlsKey CDBTLSKEY `json:"cdbTlsKey,omitempty"` + CDBTlsCrt CDBTLSCRT `json:"cdbTlsCrt,omitempty"` + + // Password for user ORDS_PUBLIC_USER + ORDSPwd ORDSPassword `json:"ordsPwd,omitempty"` + // ORDS server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. + ORDSPort int `json:"ordsPort,omitempty"` + // ORDS Image Name + ORDSImage string `json:"ordsImage,omitempty"` + // The name of the image pull secret in case of a private docker repository. + ORDSImagePullSecret string `json:"ordsImagePullSecret,omitempty"` + // ORDS Image Pull Policy + // +kubebuilder:validation:Enum=Always;Never + ORDSImagePullPolicy string `json:"ordsImagePullPolicy,omitempty"` + // Number of ORDS Containers to create + Replicas int `json:"replicas,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUser WebServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebServerPwd WebServerPassword `json:"webServerPwd,omitempty"` + // Name of the DB server + DBServer string `json:"dbServer,omitempty"` + // DB server port + DBPort int `json:"dbPort,omitempty"` + // Node Selector for running the Pod + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + CDBPubKey CDBPUBKEY `json:"cdbOrdsPubKey,omitempty"` + CDBPriKey CDBPRIVKEY `json:"cdbOrdsPrvKey,omitempty"` +} + +// CDBSecret defines the secretName +type CDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +// CDBSysAdminPassword defines the secret containing SysAdmin Password mapped to key 'sysAdminPwd' for CDB +type CDBSysAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminUser defines the secret containing CDB Administrator User mapped to key 'cdbAdminUser' to manage PDB lifecycle +type CDBAdminUser struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminPassword defines the secret containing CDB Administrator Password mapped to key 'cdbAdminPwd' to manage PDB lifecycle +type CDBAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// ORDSPassword defines the secret containing ORDS_PUBLIC_USER Password mapped to key 'ordsPwd' +type ORDSPassword struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle +type WebServerUser struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPassword struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSCRT struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPUBKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPRIVKEY struct { + Secret CDBSecret `json:"secret"` +} + +// CDBStatus defines the observed state of CDB +type CDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Phase of the CDB Resource + Phase string `json:"phase"` + // CDB Resource Status + Status bool `json:"status"` + // Message + Msg string `json:"msg,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" +// +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the CDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description=" string of the tnsalias" +// +kubebuilder:resource:path=cdbs,scope=Namespaced + +// CDB is the Schema for the cdbs API +type CDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CDBSpec `json:"spec,omitempty"` + Status CDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CDBList contains a list of CDB +type CDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CDB{}, &CDBList{}) +} diff --git a/apis/database/v1alpha1/cdb_webhook.go b/apis/database/v1alpha1/cdb_webhook.go new file mode 100644 index 00000000..e93e216e --- /dev/null +++ b/apis/database/v1alpha1/cdb_webhook.go @@ -0,0 +1,224 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var cdblog = logf.Log.WithName("cdb-webhook") + +func (r *CDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-cdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=mcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &CDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *CDB) Default() { + cdblog.Info("Setting default values in CDB spec for : " + r.Name) + + if r.Spec.ORDSPort == 0 { + r.Spec.ORDSPort = 8888 + } + + if r.Spec.Replicas == 0 { + r.Spec.Replicas = 1 + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-cdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=vcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &CDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateCreate() (admission.Warnings, error) { + cdblog.Info("ValidateCreate", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.ServiceName == "" && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("serviceName"), "Please specify CDB Service name")) + } + + if reflect.ValueOf(r.Spec.CDBTlsKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsKey"), "Please specify CDB Tls key(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBTlsCrt).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsCrt"), "Please specify CDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBPriKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("CDBPriKey"), "Please specify CDB CDBPriKey(secret)")) + } + + /*if r.Spec.SCANName == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for CDB")) + }*/ + + if (r.Spec.DBServer == "" && r.Spec.DBTnsurl == "") || (r.Spec.DBServer != "" && r.Spec.DBTnsurl != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "Please specify Database Server Name/IP Address or tnsalias string")) + } + + if r.Spec.DBTnsurl != "" && (r.Spec.DBServer != "" || r.Spec.DBPort != 0 || r.Spec.ServiceName != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "DBtnsurl is orthogonal to (DBServer,DBport,Services)")) + } + + if r.Spec.DBPort == 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify DB Server Port")) + } + if r.Spec.DBPort < 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if r.Spec.ORDSImage == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsImage"), "Please specify name of ORDS Image to be used")) + } + if reflect.ValueOf(r.Spec.CDBAdminUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminUser"), "Please specify user in the root container with sysdba priviledges to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.CDBAdminPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminPwd"), "Please specify password for the CDB Administrator to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.ORDSPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPwd"), "Please specify password for user ORDS_PUBLIC_USER")) + } + if reflect.ValueOf(r.Spec.WebServerUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerUser"), "Please specify the Web Server User having SQL Administrator role")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify password for the Web Server User having SQL Administrator role")) + } + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + cdblog.Info("validate update", "name", r.Name) + + isCDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isCDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + + // Check for updation errors + oldCDB, ok := old.(*CDB) + if !ok { + return nil, nil + } + + if r.Spec.DBPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if !strings.EqualFold(oldCDB.Spec.ServiceName, r.Spec.ServiceName) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateDelete() (admission.Warnings, error) { + cdblog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/dataguardbroker_conversion.go b/apis/database/v1alpha1/dataguardbroker_conversion.go new file mode 100644 index 00000000..39751a05 --- /dev/null +++ b/apis/database/v1alpha1/dataguardbroker_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *DataguardBroker) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *DataguardBroker) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/dataguardbroker_types.go b/apis/database/v1alpha1/dataguardbroker_types.go new file mode 100644 index 00000000..768d6dd3 --- /dev/null +++ b/apis/database/v1alpha1/dataguardbroker_types.go @@ -0,0 +1,162 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DataguardBrokerSpec defines the desired state of DataguardBroker +type DataguardBrokerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef"` + StandbyDatabaseRefs []string `json:"standbyDatabaseRefs"` + SetAsPrimaryDatabase string `json:"setAsPrimaryDatabase,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // +kubebuilder:validation:Enum=MaxPerformance;MaxAvailability + ProtectionMode string `json:"protectionMode"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + FastStartFailover bool `json:"fastStartFailover,omitempty"` +} + +// DataguardBrokerStatus defines the observed state of DataguardBroker +type DataguardBrokerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + ProtectionMode string `json:"protectionMode,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + StandbyDatabases string `json:"standbyDatabases,omitempty"` + ExternalConnectString string `json:"externalConnectString,omitempty"` + ClusterConnectString string `json:"clusterConnectString,omitempty"` + Status string `json:"status,omitempty"` + + FastStartFailover string `json:"fastStartFailover,omitempty"` + DatabasesInDataguardConfig map[string]string `json:"databasesInDataguardConfig,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.primaryDatabase",name="Primary",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.standbyDatabases",name="Standbys",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.protectionMode",name="Protection Mode",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.clusterConnectString",name="Cluster Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.externalConnectString",name="Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.primaryDatabaseRef",name="Primary Database",type="string", priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.fastStartFailover",name="FSFO", type="string" + +// DataguardBroker is the Schema for the dataguardbrokers API +type DataguardBroker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataguardBrokerSpec `json:"spec,omitempty"` + Status DataguardBrokerStatus `json:"status,omitempty"` +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns the current primary database in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetCurrentPrimaryDatabase() string { + if broker.Status.PrimaryDatabase != "" { + return broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + } + return broker.Spec.PrimaryDatabaseRef +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns databases in Dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetDatabasesInDataGuardConfiguration() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.PrimaryDatabaseRef) + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns standby databases in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetStandbyDatabasesInDgConfig() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" && value != broker.Status.PrimaryDatabase { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +//+kubebuilder:object:root=true + +// DataguardBrokerList contains a list of DataguardBroker +type DataguardBrokerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataguardBroker `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataguardBroker{}, &DataguardBrokerList{}) +} diff --git a/apis/database/v1alpha1/dataguardbroker_webhook.go b/apis/database/v1alpha1/dataguardbroker_webhook.go new file mode 100644 index 00000000..89a9d3fd --- /dev/null +++ b/apis/database/v1alpha1/dataguardbroker_webhook.go @@ -0,0 +1,183 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "strconv" + "strings" + + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var dataguardbrokerlog = logf.Log.WithName("dataguardbroker-resource") + +func (r *DataguardBroker) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-dataguardbroker,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dataguardbrokers,verbs=create;update,versions=v1alpha1,name=mdataguardbroker.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &DataguardBroker{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DataguardBroker) Default() { + dataguardbrokerlog.Info("default", "name", r.Name) + + if r.Spec.LoadBalancer { + if r.Spec.ServiceAnnotations == nil { + r.Spec.ServiceAnnotations = make(map[string]string) + } + // Annotations required for a flexible load balancer on oci + _, ok := r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape"] = "flexible" + } + _, ok = r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-min"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-min"] = "10" + } + _, ok = r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-max"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-max"] = "100" + } + } + + if r.Spec.SetAsPrimaryDatabase != "" { + r.Spec.SetAsPrimaryDatabase = strings.ToUpper(r.Spec.SetAsPrimaryDatabase) + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-dataguardbroker,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dataguardbrokers,versions=v1alpha1,name=vdataguardbroker.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &DataguardBroker{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DataguardBroker) ValidateCreate() (admission.Warnings, error) { + + dataguardbrokerlog.Info("validate create", "name", r.Name) + var allErrs field.ErrorList + namespaces := dbcommons.GetWatchNamespaces() + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "Dataguard"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DataguardBroker) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + dataguardbrokerlog.Info("validate update", "name", r.Name) + + dataguardbrokerlog.Info("validate update", "name", r.Name) + var allErrs field.ErrorList + + // check creation validations first + _, err := r.ValidateCreate() + if err != nil { + return nil, err + } + + // Validate Deletion + if r.GetDeletionTimestamp() != nil { + warnings, err := r.ValidateDelete() + if err != nil { + return warnings, err + } + } + + // Now check for updation errors + oldObj, ok := old.(*DataguardBroker) + if !ok { + return nil, nil + } + + if oldObj.Status.ProtectionMode != "" && !strings.EqualFold(r.Spec.ProtectionMode, oldObj.Status.ProtectionMode) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("protectionMode"), "cannot be changed")) + } + if oldObj.Status.PrimaryDatabaseRef != "" && !strings.EqualFold(oldObj.Status.PrimaryDatabaseRef, r.Spec.PrimaryDatabaseRef) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("primaryDatabaseRef"), "cannot be changed")) + } + fastStartFailoverStatus, _ := strconv.ParseBool(oldObj.Status.FastStartFailover) + if (fastStartFailoverStatus || r.Spec.FastStartFailover) && r.Spec.SetAsPrimaryDatabase != "" { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("setAsPrimaryDatabase"), "switchover not supported when fastStartFailover is true")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "DataguardBroker"}, + r.Name, allErrs) + +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DataguardBroker) ValidateDelete() (admission.Warnings, error) { + dataguardbrokerlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/dbcssystem_conversion.go b/apis/database/v1alpha1/dbcssystem_conversion.go new file mode 100644 index 00000000..0aa6a258 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *DbcsSystem) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *DbcsSystem) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/dbcssystem_kms_types.go b/apis/database/v1alpha1/dbcssystem_kms_types.go new file mode 100644 index 00000000..c90726e3 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_kms_types.go @@ -0,0 +1,141 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import "encoding/json" + +type KMSConfig struct { + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} +type KMSDetailsStatus struct { + VaultId string `json:"vaultId,omitempty"` + ManagementEndpoint string `json:"managementEndpoint,omitempty"` + KeyId string `json:"keyId,omitempty"` + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} + +const ( + lastSuccessfulKMSConfig = "lastSuccessfulKMSConfig" + lastSuccessfulKMSStatus = "lastSuccessfulKMSStatus" +) + +// GetLastSuccessfulKMSConfig returns the KMS config from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSConfig. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSConfig() (*KMSConfig, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSConfig] + if !ok { + return nil, nil + } + + configBytes := []byte(val) + kmsConfig := KMSConfig{} + + err := json.Unmarshal(configBytes, &kmsConfig) + if err != nil { + return nil, err + } + + return &kmsConfig, nil +} + +// GetLastSuccessfulKMSStatus returns the KMS status from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSStatus. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSStatus() (*KMSDetailsStatus, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSStatus] + if !ok { + return nil, nil + } + + statusBytes := []byte(val) + kmsStatus := KMSDetailsStatus{} + + err := json.Unmarshal(statusBytes, &kmsStatus) + if err != nil { + return nil, err + } + + return &kmsStatus, nil +} + +// SetLastSuccessfulKMSConfig saves the given KMSConfig to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSConfig(kmsConfig *KMSConfig) error { + configBytes, err := json.Marshal(kmsConfig) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSConfig] = string(configBytes) + dbcs.SetAnnotations(annotations) + return nil +} + +// SetLastSuccessfulKMSStatus saves the given KMSDetailsStatus to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSStatus(kmsStatus *KMSDetailsStatus) error { + statusBytes, err := json.Marshal(kmsStatus) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSStatus] = string(statusBytes) + dbcs.SetAnnotations(annotations) + // Update KMSDetailsStatus in DbcsSystemStatus + dbcs.Status.KMSDetailsStatus = KMSDetailsStatus{ + VaultName: kmsStatus.VaultName, + CompartmentId: kmsStatus.CompartmentId, + KeyName: kmsStatus.KeyName, + EncryptionAlgo: kmsStatus.EncryptionAlgo, + VaultType: kmsStatus.VaultType, + } + return nil +} diff --git a/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go b/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go new file mode 100644 index 00000000..1b745e09 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go @@ -0,0 +1,83 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +// PDBConfig defines details of PDB struct for DBCS systems +type PDBConfig struct { + // The name for the pluggable database (PDB). The name is unique in the context of a Database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. The pluggable database name should not be same as the container database name. + PdbName *string `mandatory:"true" json:"pdbName"` + + // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the CDB + // ContainerDatabaseId *string `mandatory:"false" json:"containerDatabaseId"` + + // // A strong password for PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, \#, or -. + PdbAdminPassword *string `mandatory:"false" json:"pdbAdminPassword"` + + // // The existing TDE wallet password of the CDB. + TdeWalletPassword *string `mandatory:"false" json:"tdeWalletPassword"` + + // // The locked mode of the pluggable database admin account. If false, the user needs to provide the PDB Admin Password to connect to it. + // // If true, the pluggable database will be locked and user cannot login to it. + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + + // // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // // Example: `{"Department": "Finance"}` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + + // // Defined tags for this resource. Each key is predefined and scoped to a namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` + + // To specify whether to delete the PDB + IsDelete *bool `mandatory:"false" json:"isDelete,omitempty"` + + // The OCID of the PDB for deletion purposes. + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` +} + +type PDBConfigStatus struct { + PdbName *string `mandatory:"true" json:"pdbName"` + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` + PdbLifecycleState LifecycleState `json:"pdbState,omitempty"` +} +type PDBDetailsStatus struct { + PDBConfigStatus []PDBConfigStatus `json:"pdbConfigStatus,omitempty"` +} diff --git a/apis/database/v1alpha1/dbcssystem_types.go b/apis/database/v1alpha1/dbcssystem_types.go new file mode 100644 index 00000000..d49fde8c --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_types.go @@ -0,0 +1,290 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/go-logr/logr" + dbcsv1 "github.com/oracle/oracle-database-operator/commons/annotations" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DbcsSystemSpec defines the desired state of DbcsSystem +type DbcsSystemSpec struct { + DbSystem DbSystemDetails `json:"dbSystem,omitempty"` + Id *string `json:"id,omitempty"` + OCIConfigMap *string `json:"ociConfigMap"` + OCISecret *string `json:"ociSecret,omitempty"` + DbClone *DbCloneConfig `json:"dbClone,omitempty"` + HardLink bool `json:"hardLink,omitempty"` + PdbConfigs []PDBConfig `json:"pdbConfigs,omitempty"` + SetupDBCloning bool `json:"setupDBCloning,omitempty"` + DbBackupId *string `json:"dbBackupId,omitempty"` + DatabaseId *string `json:"databaseId,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DbSystemDetails Spec + +type DbSystemDetails struct { + CompartmentId string `json:"compartmentId"` + AvailabilityDomain string `json:"availabilityDomain"` + SubnetId string `json:"subnetId"` + Shape string `json:"shape"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + HostName string `json:"hostName"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + FaultDomains []string `json:"faultDomains,omitempty"` + DisplayName string `json:"displayName,omitempty"` + BackupSubnetId string `json:"backupSubnetId,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + NodeCount *int `json:"nodeCount,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` + Domain string `json:"domain,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + ClusterName string `json:"clusterName,omitempty"` + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret"` + DbName string `json:"dbName,omitempty"` + PdbName string `json:"pdbName,omitempty"` + DbDomain string `json:"dbDomain,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + DbVersion string `json:"dbVersion,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + DiskRedundancy string `json:"diskRedundancy,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + DbBackupConfig Backupconfig `json:"dbBackupConfig,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DB Backup Config Network Struct +type Backupconfig struct { + AutoBackupEnabled *bool `json:"autoBackupEnabled,omitempty"` + RecoveryWindowsInDays *int `json:"recoveryWindowsInDays,omitempty"` + AutoBackupWindow *string `json:"autoBackupWindow,omitempty"` + BackupDestinationDetails *string `json:"backupDestinationDetails,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbcsSystemStatus struct { + Id *string `json:"id,omitempty"` + DisplayName string `json:"displayName,omitempty"` + AvailabilityDomain string `json:"availabilityDomain,omitempty"` + SubnetId string `json:"subnetId,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + NodeCount int `json:"nodeCount,omitempty"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + + DbEdition string `json:"dbEdition,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + DataStoragePercentage *int `json:"dataStoragePercentage,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + DataStorageSizeInGBs *int `json:"dataStorageSizeInGBs,omitempty"` + RecoStorageSizeInGB *int `json:"recoStorageSizeInGB,omitempty"` + + Shape *string `json:"shape,omitempty"` + State LifecycleState `json:"state"` + DbInfo []DbStatus `json:"dbInfo,omitempty"` + Network VmNetworkDetails `json:"network,omitempty"` + WorkRequests []DbWorkrequests `json:"workRequests,omitempty"` + KMSDetailsStatus KMSDetailsStatus `json:"kmsDetailsStatus,omitempty"` + DbCloneStatus DbCloneStatus `json:"dbCloneStatus,omitempty"` + PdbDetailsStatus []PDBDetailsStatus `json:"pdbDetailsStatus,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbStatus struct { + Id *string `json:"id,omitempty"` + DbName string `json:"dbName,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + DbHomeId string `json:"dbHomeId,omitempty"` +} + +type DbWorkrequests struct { + OperationType *string `json:"operationType,omitmpty"` + OperationId *string `json:"operationId,omitemty"` + PercentComplete string `json:"percentComplete,omitempty"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeFinished string `json:"timeFinished,omitempty"` +} + +type VmNetworkDetails struct { + VcnName *string `json:"vcnName,omitempty"` + SubnetName *string `json:"clientSubnet,omitempty"` + ScanDnsName *string `json:"scanDnsName,omitempty"` + HostName string `json:"hostName,omitempty"` + DomainName string `json:"domainName,omitempty"` + ListenerPort *int `json:"listenerPort,omitempty"` + NetworkSG string `json:"networkSG,omitempty"` +} + +// DbCloneConfig defines the configuration for the database clone +type DbCloneConfig struct { + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + DbName string `json:"dbName"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId"` + SidPrefix string `json:"sidPrefix,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + KmsKeyId string `json:"kmsKeyId,omitempty"` + KmsKeyVersionId string `json:"kmsKeyVersionId,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` +} + +// DbCloneStatus defines the observed state of DbClone +type DbCloneStatus struct { + Id *string `json:"id,omitempty"` + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + DbName string `json:"dbName,omitempty"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=dbcssystems,scope=Namespaced + +// DbcsSystem is the Schema for the dbcssystems API +type DbcsSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DbcsSystemSpec `json:"spec,omitempty"` + Status DbcsSystemStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DbcsSystemList contains a list of DbcsSystem +type DbcsSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DbcsSystem `json:"items"` +} + +type LifecycleState string + +const ( + Available LifecycleState = "AVAILABLE" + Failed LifecycleState = "FAILED" + Update LifecycleState = "UPDATING" + Provision LifecycleState = "PROVISIONING" + Terminate LifecycleState = "TERMINATED" +) + +const lastSuccessfulSpec = "lastSuccessfulSpec" + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (dbcs *DbcsSystem) GetLastSuccessfulSpec() (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} +func (dbcs *DbcsSystem) GetLastSuccessfulSpecWithLog(log logr.Logger) (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + log.Info("No last successful spec annotation found") + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + log.Error(err, "Failed to unmarshal last successful spec") + return nil, err + } + + log.Info("Successfully retrieved last successful spec", "spec", sucSpec) + return &sucSpec, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (dbcs *DbcsSystem) UpdateLastSuccessfulSpec(kubeClient client.Client) error { + specBytes, err := json.Marshal(dbcs.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + lastSuccessfulSpec: string(specBytes), + } + + // return dbcsv1.SetAnnotations(kubeClient, dbcs, anns) + return dbcsv1.PatchAnnotations(kubeClient, dbcs, anns) + +} + +func init() { + SchemeBuilder.Register(&DbcsSystem{}, &DbcsSystemList{}) +} diff --git a/apis/database/v1alpha1/dbcssystem_webhook.go b/apis/database/v1alpha1/dbcssystem_webhook.go new file mode 100644 index 00000000..dc9f8934 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_webhook.go @@ -0,0 +1,98 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var dbcssystemlog = logf.Log.WithName("dbcssystem-resource") + +func (r *DbcsSystem) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-dbcssystem,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=dbcssystems,verbs=create;update,versions=v4,name=mdbcssystemv1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DbcsSystem{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DbcsSystem) Default() { + dbcssystemlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. + +// +kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-dbcssystem,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dbcssystems,versions=v4,name=vdbcssystemv1alpha1.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &DbcsSystem{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateCreate() (admission.Warnings, error) { + dbcssystemlog.Info("validate create", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object creation. + return nil, nil +} + +// // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + dbcssystemlog.Info("validate update", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateDelete() (admission.Warnings, error) { + dbcssystemlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/groupversion_info.go b/apis/database/v1alpha1/groupversion_info.go index 60029108..3c4b1804 100644 --- a/apis/database/v1alpha1/groupversion_info.go +++ b/apis/database/v1alpha1/groupversion_info.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -37,8 +37,8 @@ */ // Package v1alpha1 contains API Schema definitions for the database v1alpha1 API group -//+kubebuilder:object:generate=true -//+groupName=database.oracle.com +// +kubebuilder:object:generate=true +// +groupName=database.oracle.com package v1alpha1 import ( diff --git a/apis/database/v1alpha1/oraclerestdataservice_conversion.go b/apis/database/v1alpha1/oraclerestdataservice_conversion.go new file mode 100644 index 00000000..a16e1ff6 --- /dev/null +++ b/apis/database/v1alpha1/oraclerestdataservice_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *OracleRestDataService) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *OracleRestDataService) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/oraclerestdataservice_types.go b/apis/database/v1alpha1/oraclerestdataservice_types.go new file mode 100644 index 00000000..bab04092 --- /dev/null +++ b/apis/database/v1alpha1/oraclerestdataservice_types.go @@ -0,0 +1,157 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OracleRestDataServiceSpec defines the desired state of OracleRestDataService +type OracleRestDataServiceSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + DatabaseRef string `json:"databaseRef"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Image OracleRestDataServiceImage `json:"image,omitempty"` + OrdsPassword OracleRestDataServicePassword `json:"ordsPassword"` + AdminPassword OracleRestDataServicePassword `json:"adminPassword"` + OrdsUser string `json:"ordsUser,omitempty"` + RestEnableSchemas []OracleRestDataServiceRestEnableSchemas `json:"restEnableSchemas,omitempty"` + OracleService string `json:"oracleService,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + Persistence OracleRestDataServicePersistence `json:"persistence,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + + // +k8s:openapi-gen=true + // +kubebuilder:validation:Minimum=1 + Replicas int `json:"replicas,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` +} + +// OracleRestDataServicePersistence defines the storage releated params +type OracleRestDataServicePersistence struct { + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + + // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany + AccessMode string `json:"accessMode,omitempty"` + VolumeName string `json:"volumeName,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` +} + +// OracleRestDataServiceImage defines the Image source and pullSecrets for POD +type OracleRestDataServiceImage struct { + Version string `json:"version,omitempty"` + PullFrom string `json:"pullFrom"` + PullSecrets string `json:"pullSecrets,omitempty"` +} + +// OracleRestDataServicePassword defines the secret containing Password mapped to secretKey +type OracleRestDataServicePassword struct { + SecretName string `json:"secretName"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` +} + +// OracleRestDataServicePDBSchemas defines the PDB Schemas to be ORDS Enabled +type OracleRestDataServiceRestEnableSchemas struct { + PdbName string `json:"pdbName,omitempty"` + SchemaName string `json:"schemaName"` + UrlMapping string `json:"urlMapping,omitempty"` + Enable bool `json:"enable"` +} + +// OracleRestDataServiceStatus defines the observed state of OracleRestDataService +type OracleRestDataServiceStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Status string `json:"status,omitempty"` + DatabaseApiUrl string `json:"databaseApiUrl,omitempty"` + LoadBalancer string `json:"loadBalancer,omitempty"` + DatabaseRef string `json:"databaseRef,omitempty"` + ServiceIP string `json:"serviceIP,omitempty"` + DatabaseActionsUrl string `json:"databaseActionsUrl,omitempty"` + MongoDbApiAccessUrl string `json:"mongoDbApiAccessUrl,omitempty"` + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + ApexConfigured bool `json:"apexConfigured,omitempty"` + ApxeUrl string `json:"apexUrl,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + CommonUsersCreated bool `json:"commonUsersCreated,omitempty"` + Replicas int `json:"replicas,omitempty"` + + Image OracleRestDataServiceImage `json:"image,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.databaseRef",name="Database",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseApiUrl",name="Database API URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseActionsUrl",name="Database Actions URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.apexUrl",name="Apex URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.mongoDbApiAccessUrl",name="MongoDbApi Access URL",type="string" + +// OracleRestDataService is the Schema for the oraclerestdataservices API +type OracleRestDataService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OracleRestDataServiceSpec `json:"spec,omitempty"` + Status OracleRestDataServiceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OracleRestDataServiceList contains a list of OracleRestDataService +type OracleRestDataServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OracleRestDataService `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OracleRestDataService{}, &OracleRestDataServiceList{}) +} diff --git a/apis/database/v1alpha1/oraclerestdataservice_webhook.go b/apis/database/v1alpha1/oraclerestdataservice_webhook.go new file mode 100644 index 00000000..c5ecde1c --- /dev/null +++ b/apis/database/v1alpha1/oraclerestdataservice_webhook.go @@ -0,0 +1,181 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var oraclerestdataservicelog = logf.Log.WithName("oraclerestdataservice-resource") + +func (r *OracleRestDataService) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-oraclerestdataservice,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=oraclerestdataservices,verbs=create;update,versions=v1alpha1,name=moraclerestdataservice.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &OracleRestDataService{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *OracleRestDataService) Default() { + oraclerestdataservicelog.Info("default", "name", r.Name) + // OracleRestDataService Currently supports single replica + r.Spec.Replicas = 1 + keepSecret := true + if r.Spec.OrdsPassword.KeepSecret == nil { + r.Spec.OrdsPassword.KeepSecret = &keepSecret + } + if r.Spec.AdminPassword.KeepSecret == nil { + r.Spec.AdminPassword.KeepSecret = &keepSecret + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-oraclerestdataservice,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=oraclerestdataservices,versions=v1alpha1,name=voraclerestdataservice.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &OracleRestDataService{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *OracleRestDataService) ValidateCreate() (admission.Warnings, error) { + oraclerestdataservicelog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Persistence spec validation + if r.Spec.Persistence.Size == "" && (r.Spec.Persistence.AccessMode != "" || + r.Spec.Persistence.StorageClass != "" || r.Spec.Persistence.VolumeName != "") { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("size"), r.Spec.Persistence, + "invalid persistence specification, specify required size")) + } + + if r.Spec.Persistence.Size != "" { + if r.Spec.Persistence.AccessMode == "" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("size"), r.Spec.Persistence, + "invalid persistence specification, specify accessMode")) + } + if r.Spec.Persistence.AccessMode != "ReadWriteMany" && r.Spec.Persistence.AccessMode != "ReadWriteOnce" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("accessMode"), + r.Spec.Persistence.AccessMode, "should be either \"ReadWriteOnce\" or \"ReadWriteMany\"")) + } + } + + // Validating databaseRef and ORDS kind name not to be same + if r.Spec.DatabaseRef == r.Name { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("Name"), + "cannot be same as DatabaseRef: "+r.Spec.DatabaseRef)) + + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "OracleRestDataService"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *OracleRestDataService) ValidateUpdate(oldRuntimeObject runtime.Object) (admission.Warnings, error) { + oraclerestdataservicelog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + + // check creation validations first + warnings, err := r.ValidateCreate() + if err != nil { + return warnings, err + } + + // Now check for updation errors + old, ok := oldRuntimeObject.(*OracleRestDataService) + if !ok { + return nil, nil + } + + if old.Status.DatabaseRef != "" && old.Status.DatabaseRef != r.Spec.DatabaseRef { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("databaseRef"), "cannot be changed")) + } + if old.Status.Image.PullFrom != "" && old.Status.Image != r.Spec.Image { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("image"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "OracleRestDataService"}, + r.Name, allErrs) + +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *OracleRestDataService) ValidateDelete() (admission.Warnings, error) { + oraclerestdataservicelog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/pdb_types.go b/apis/database/v1alpha1/pdb_types.go new file mode 100644 index 00000000..8b966c38 --- /dev/null +++ b/apis/database/v1alpha1/pdb_types.go @@ -0,0 +1,236 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PDBSpec defines the desired state of PDB +type PDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PDBTlsKey PDBTLSKEY `json:"pdbTlsKey,omitempty"` + PDBTlsCrt PDBTLSCRT `json:"pdbTlsCrt,omitempty"` + PDBTlsCat PDBTLSCAT `json:"pdbTlsCat,omitempty"` + + // CDB Namespace + CDBNamespace string `json:"cdbNamespace,omitempty"` + // Name of the CDB Custom Resource that runs the ORDS container + CDBResName string `json:"cdbResName,omitempty"` + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // The name of the new PDB. Relevant for both Create and Plug Actions. + PDBName string `json:"pdbName,omitempty"` + // Name of the Source PDB from which to clone + SrcPDBName string `json:"srcPdbName,omitempty"` + // The administrator username for the new PDB. This property is required when the Action property is Create. + AdminName PDBAdminName `json:"adminName,omitempty"` + // The administrator password for the new PDB. This property is required when the Action property is Create. + AdminPwd PDBAdminPassword `json:"adminPwd,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUsr WebServerUserPDB `json:"webServerUser,omitempty"` + // Password for the Web ServerPDB User + WebServerPwd WebServerPasswordPDB `json:"webServerPwd,omitempty"` + // Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. + FileNameConversions string `json:"fileNameConversions,omitempty"` + // This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + SourceFileNameConversions string `json:"sourceFileNameConversions,omitempty"` + // XML metadata filename to be used for Plug or Unplug operations + XMLFileName string `json:"xmlFileName,omitempty"` + // To copy files or not while cloning a PDB + // +kubebuilder:validation:Enum=COPY;NOCOPY;MOVE + CopyAction string `json:"copyAction,omitempty"` + // Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). + // +kubebuilder:validation:Enum=INCLUDING;KEEP + DropAction string `json:"dropAction,omitempty"` + // A Path specified for sparse clone snapshot copy. (Optional) + SparseClonePath string `json:"sparseClonePath,omitempty"` + // Whether to reuse temp file + ReuseTempFile *bool `json:"reuseTempFile,omitempty"` + // Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. + UnlimitedStorage *bool `json:"unlimitedStorage,omitempty"` + // Indicate if 'AS CLONE' option should be used in the command to plug in a PDB. This property is applicable when the Action property is PLUG but not required. + AsClone *bool `json:"asClone,omitempty"` + // Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TotalSize string `json:"totalSize,omitempty"` + // Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TempSize string `json:"tempSize,omitempty"` + // TDE import for plug operations + TDEImport *bool `json:"tdeImport,omitempty"` + // TDE export for unplug operations + TDEExport *bool `json:"tdeExport,omitempty"` + // TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations + TDEPassword TDEPwd `json:"tdePassword,omitempty"` + // TDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDEKeystorePath string `json:"tdeKeystorePath,omitempty"` + // TDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDESecret TDESecret `json:"tdeSecret,omitempty"` + // Whether you need the script only or execute the script + GetScript *bool `json:"getScript,omitempty"` + // Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. Map is used to map a Databse PDB to a Kubernetes PDB CR. + // +kubebuilder:validation:Enum=Create;Clone;Plug;Unplug;Delete;Modify;Status;Map + Action string `json:"action"` + // Extra options for opening and closing a PDB + // +kubebuilder:validation:Enum=IMMEDIATE;NORMAL;READ ONLY;READ WRITE;RESTRICTED + ModifyOption string `json:"modifyOption,omitempty"` + // The target state of the PDB + // +kubebuilder:validation:Enum=OPEN;CLOSE + PDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` + PDBPubKey PDBPUBKEY `json:"pdbOrdsPubKey,omitempty"` + PDBPriKey PDBPRIVKEY `json:"pdbOrdsPrvKey,omitempty"` +} + +// PDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for PDB +type PDBAdminName struct { + Secret PDBSecret `json:"secret"` +} + +// PDBAdminPassword defines the secret containing Sys Admin Password mapped to key 'adminPwd' for PDB +type PDBAdminPassword struct { + Secret PDBSecret `json:"secret"` +} + +// TDEPwd defines the secret containing TDE Wallet Password mapped to key 'tdePassword' for PDB +type TDEPwd struct { + Secret PDBSecret `json:"secret"` +} + +// TDESecret defines the secret containing TDE Secret to key 'tdeSecret' for PDB +type TDESecret struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle + +type WebServerUserPDB struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPasswordPDB struct { + Secret PDBSecret `json:"secret"` +} + +// PDBSecret defines the secretName +type PDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +type PDBTLSKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCRT struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCAT struct { + Secret PDBSecret `json:"secret"` +} + +// PDBStatus defines the observed state of PDB +type PDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // PDB Connect String + ConnString string `json:"connString,omitempty"` + // Phase of the PDB Resource + Phase string `json:"phase"` + // PDB Resource Status + Status bool `json:"status"` + // Total size of the PDB + TotalSize string `json:"totalSize,omitempty"` + // Open mode of the PDB + OpenMode string `json:"openMode,omitempty"` + // Modify Option of the PDB + ModifyOption string `json:"modifyOption,omitempty"` + // Message + Msg string `json:"msg,omitempty"` + // Last Completed Action + Action string `json:"action,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" +// +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the PDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" +// +kubebuilder:resource:path=pdbs,scope=Namespaced + +// PDB is the Schema for the pdbs API +type PDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PDBSpec `json:"spec,omitempty"` + Status PDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// PDBList contains a list of PDB +type PDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PDB `json:"items"` +} + +type PDBPUBKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBPRIVKEY struct { + Secret PDBSecret `json:"secret"` +} + +func init() { + SchemeBuilder.Register(&PDB{}, &PDBList{}) +} diff --git a/apis/database/v1alpha1/pdb_webhook.go b/apis/database/v1alpha1/pdb_webhook.go new file mode 100644 index 00000000..1f115c9b --- /dev/null +++ b/apis/database/v1alpha1/pdb_webhook.go @@ -0,0 +1,369 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +/* MODIFIED (MM/DD/YY) +** rcitton 07/14/22 - 33822886 + */ + +package v1alpha1 + +import ( + "reflect" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var pdblog = logf.Log.WithName("pdb-webhook") + +func (r *PDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-pdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=mpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &PDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *PDB) Default() { + pdblog.Info("Setting default values in PDB spec for : " + r.Name) + + action := strings.ToUpper(r.Spec.Action) + + if action == "DELETE" { + if r.Spec.DropAction == "" { + r.Spec.DropAction = "INCLUDING" + pdblog.Info(" - dropAction : INCLUDING") + } + } else if action != "MODIFY" && action != "STATUS" { + if r.Spec.ReuseTempFile == nil { + r.Spec.ReuseTempFile = new(bool) + *r.Spec.ReuseTempFile = true + pdblog.Info(" - reuseTempFile : " + strconv.FormatBool(*(r.Spec.ReuseTempFile))) + } + if r.Spec.UnlimitedStorage == nil { + r.Spec.UnlimitedStorage = new(bool) + *r.Spec.UnlimitedStorage = true + pdblog.Info(" - unlimitedStorage : " + strconv.FormatBool(*(r.Spec.UnlimitedStorage))) + } + if r.Spec.TDEImport == nil { + r.Spec.TDEImport = new(bool) + *r.Spec.TDEImport = false + pdblog.Info(" - tdeImport : " + strconv.FormatBool(*(r.Spec.TDEImport))) + } + if r.Spec.TDEExport == nil { + r.Spec.TDEExport = new(bool) + *r.Spec.TDEExport = false + pdblog.Info(" - tdeExport : " + strconv.FormatBool(*(r.Spec.TDEExport))) + } + if r.Spec.AsClone == nil { + r.Spec.AsClone = new(bool) + *r.Spec.AsClone = false + pdblog.Info(" - asClone : " + strconv.FormatBool(*(r.Spec.AsClone))) + } + + } + + if r.Spec.GetScript == nil { + r.Spec.GetScript = new(bool) + *r.Spec.GetScript = false + pdblog.Info(" - getScript : " + strconv.FormatBool(*(r.Spec.GetScript))) + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-pdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=vpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &PDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateCreate() (admission.Warnings, error) { + pdblog.Info("ValidateCreate-Validating PDB spec for : " + r.Name) + + var allErrs field.ErrorList + + r.validateCommon(&allErrs) + + r.validateAction(&allErrs) + + action := strings.ToUpper(r.Spec.Action) + + if len(allErrs) == 0 { + pdblog.Info("PDB Resource : " + r.Name + " successfully validated for Action : " + action) + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// Validate Action for required parameters +func (r *PDB) validateAction(allErrs *field.ErrorList) { + action := strings.ToUpper(r.Spec.Action) + + pdblog.Info("Valdiating PDB Resource Action : " + action) + + if reflect.ValueOf(r.Spec.PDBTlsKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsKey"), "Please specify PDB Tls Key(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCrt).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCrt"), "Please specify PDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCat).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCat"), "Please specify PDB Tls Certificate Authority(secret)")) + } + if reflect.ValueOf(r.Spec.PDBPriKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbOrdsPrvKey"), "Please specify PDB Tls Certificate Authority(secret)")) + } + + switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, r) + case "CREATE": + if reflect.ValueOf(r.Spec.AdminName).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminName"), "Please specify PDB System Administrator user")) + } + if reflect.ValueOf(r.Spec.AdminPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminPwd"), "Please specify PDB System Administrator Password")) + } + if reflect.ValueOf(r.Spec.WebServerUsr).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("WebServerUser"), "Please specify the http webServerUser")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify the http webserverPassword")) + } + + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "CLONE": + // Sample Err: The PDB "pdb1-clone" is invalid: spec.srcPdbName: Required value: Please specify source PDB for Cloning + if r.Spec.SrcPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("srcPdbName"), "Please specify source PDB name for Cloning")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + /* We don't need this check as ords open the pdb before cloninig */ + /* + if r.Status.OpenMode == "MOUNTED" { + pdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + */ + case "PLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.SourceFileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("sourceFileNameConversions"), "Please specify a value for sourceFileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.CopyAction == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("copyAction"), "Please specify a value for copyAction. Values can be COPY, NOCOPY or MOVE")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "UNPLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if *(r.Spec.TDEExport) { + r.validateTDEInfo(allErrs) + } + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, r) + case "MODIFY": + if r.Spec.PDBState == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbState"), "Please specify target state of PDB")) + } + if r.Spec.ModifyOption == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a PDB")) + } + r.CheckObjExistence("MODIY", allErrs, r) + } +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + pdblog.Info("ValidateUpdate-Validating PDB spec for : " + r.Name) + + isPDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isPDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + action := strings.ToUpper(r.Spec.Action) + + // If PDB CR has been created and in Ready state, only allow updates if the "action" value has changed as well + if (r.Status.Phase == "Ready") && (r.Status.Action != "MODIFY") && (r.Status.Action != "STATUS") && (r.Status.Action == action) { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("action"), "New action also needs to be specified after PDB is in Ready state")) + } else { + + // Check Common Validations + r.validateCommon(&allErrs) + + // Validate required parameters for Action specified + r.validateAction(&allErrs) + + // Check TDE requirements + if (action != "DELETE") && (action != "MODIFY") && (action != "STATUS") && (*(r.Spec.TDEImport) || *(r.Spec.TDEExport)) { + r.validateTDEInfo(&allErrs) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateDelete() (admission.Warnings, error) { + pdblog.Info("ValidateDelete-Validating PDB spec for : " + r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// Validate common specs needed for all PDB Actions +func (r *PDB) validateCommon(allErrs *field.ErrorList) { + pdblog.Info("validateCommon", "name", r.Name) + + if r.Spec.Action == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("action"), "Please specify PDB operation to be performed")) + } + if r.Spec.CDBResName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbResName"), "Please specify the name of the CDB Kubernetes resource to use for PDB operations")) + } + if r.Spec.PDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbName"), "Please specify name of the PDB to be created")) + } +} + +// Validate TDE information for Create, Plug and Unplug Actions +func (r *PDB) validateTDEInfo(allErrs *field.ErrorList) { + pdblog.Info("validateTDEInfo", "name", r.Name) + + if reflect.ValueOf(r.Spec.TDEPassword).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdePassword"), "Please specify a value for tdePassword.")) + } + if r.Spec.TDEKeystorePath == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeKeystorePath"), "Please specify a value for tdeKeystorePath.")) + } + if reflect.ValueOf(r.Spec.TDESecret).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeSecret"), "Please specify a value for tdeSecret.")) + } + +} + +func (r *PDB) CheckObjExistence(action string, allErrs *field.ErrorList, pdb *PDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + pdblog.Info("Action [" + action + "] checkin " + pdb.Spec.PDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("PDBName"), " "+pdb.Spec.PDBName+" does not exist : action "+action+" failure")) + + } +} diff --git a/apis/database/v1alpha1/shardingdatabase_conversion.go b/apis/database/v1alpha1/shardingdatabase_conversion.go new file mode 100644 index 00000000..d8db75ca --- /dev/null +++ b/apis/database/v1alpha1/shardingdatabase_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *ShardingDatabase) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *ShardingDatabase) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/shardingdatabase_types.go b/apis/database/v1alpha1/shardingdatabase_types.go index c372b991..ae9066fc 100644 --- a/apis/database/v1alpha1/shardingdatabase_types.go +++ b/apis/database/v1alpha1/shardingdatabase_types.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -58,26 +58,43 @@ import ( type ShardingDatabaseSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - Shard []ShardSpec `json:"shard"` - Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters - Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter - StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name - DbImage string `json:"dbImage"` // Accept DB Image name - DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. - GsmImage string `json:"gsmImage"` // Acccept the GSM image name - GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. - Secret string `json:"secret"` // Secret Name to be used with Shard - StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster - PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least - Namespace string `json:"namespace,omitempty"` // Target namespace of the application. - IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining - IsExternalSvc bool `json:"isExternalSvc,omitempty"` - IsClone bool `json:"isClone,omitempty"` - IsDataGuard bool `json:"isDataGuard,omitempty"` - ScriptsLocation string `json:"scriptsLocation,omitempty"` - NsConfigMap string `json:"nsConfigMap,omitempty"` - NsSecret string `json:"nsSecret,omitempty"` - IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` + Shard []ShardSpec `json:"shard"` + Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters + Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter + StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name + DbImage string `json:"dbImage"` // Accept DB Image name + DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + GsmImage string `json:"gsmImage"` // Acccept the GSM image name + GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster + PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least + IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining + IsExternalSvc bool `json:"isExternalSvc,omitempty"` + IsClone bool `json:"isClone,omitempty"` + IsDataGuard bool `json:"isDataGuard,omitempty"` + ScriptsLocation string `json:"scriptsLocation,omitempty"` + IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + LivenessCheckPeriod int `json:"liveinessCheckPeriod,omitempty"` + ReplicationType string `json:"replicationType,omitempty"` + IsDownloadScripts bool `json:"isDownloadScripts,omitempty"` + InvitedNodeSubnetFlag string `json:"invitedNodeSubnetFlag,omitempty"` + InvitedNodeSubnet string `json:"InvitedNodeSubnet,omitempty"` + ShardingType string `json:"shardingType,omitempty"` + GsmShardSpace []GsmShardSpaceSpec `json:"gsmShardSpace,omitempty"` + GsmShardGroup []GsmShardGroupSpec `json:"gsmShardGroup,omitempty"` + ShardRegion []string `json:"shardRegion,omitempty"` + ShardBuddyRegion string `json:"shardBuddyRegion,omitempty"` + GsmService []GsmServiceSpec `json:"gsmService,omitempty"` + ShardConfigName string `json:"shardConfigName,omitempty"` + GsmDevMode string `json:"gsmDevMode,omitempty"` + DbSecret *SecretDetails `json:"dbSecret,omitempty"` // Secret Name to be used with Shard + IsTdeWallet string `json:"isTdeWallet,omitempty"` + TdeWalletPvc string `json:"tdeWalletPvc,omitempty"` + FssStorageClass string `json:"fssStorageClass,omitempty"` + TdeWalletPvcMountLocation string `json:"tdeWalletPvcMountLocation,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + TopicId string `json:"topicId,omitempty"` } // To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 @@ -88,7 +105,8 @@ type ShardingDatabaseStatus struct { Shard map[string]string `json:"shards,omitempty"` Catalog map[string]string `json:"catalogs,omitempty"` - Gsm GsmStatus `json:"gsm,omitempty"` + + Gsm GsmStatus `json:"gsm,omitempty"` // +patchMergeKey=type // +patchStrategy=merge @@ -106,6 +124,12 @@ type GsmStatus struct { Services string `json:"services,omitempty"` } +type GsmShardDetails struct { + Name string `json:"name,omitempty"` + Available string `json:"available,omitempty"` + State string `json:"State,omitempty"` +} + type GsmStatusDetails struct { Name string `json:"name,omitempty"` K8sInternalSvc string `json:"k8sInternalSvc,omitempty"` @@ -118,8 +142,12 @@ type GsmStatusDetails struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".status.gsm.state",name="Gsm State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.services",name="Services",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.shards",name="shards",type=string,priority=1 // ShardingDatabase is the Schema for the shardingdatabases API +// +kubebuilder:resource:path=shardingdatabases,scope=Namespaced type ShardingDatabase struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -140,17 +168,22 @@ type ShardingDatabaseList struct { // ShardSpec is a specification of Shards for an application deployment. // +k8s:openapi-gen=true type ShardSpec struct { - Name string `json:"name"` // Shard name that will be used deploy StatefulSet - StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size - EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards - Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. - PvcName string `json:"pvcName,omitempty"` - Label string `json:"label,omitempty"` - IsDelete bool `json:"isDelete,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` - PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` - ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Name string `json:"name"` // Shard name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + // +kubebuilder:validation:Enum=enable;disable;failed;force + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ShardSpace string `json:"shardSpace,omitempty"` + ShardGroup string `json:"shardGroup,omitempty"` + ShardRegion string `json:"shardRegion,omitempty"` + DeployAs string `json:"deployAs,omitempty"` } // CatalogSpec defines the desired state of CatalogSpec @@ -162,7 +195,7 @@ type CatalogSpec struct { Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. PvcName string `json:"pvcName,omitempty"` Label string `json:"label,omitempty"` - IsDelete bool `json:"isDelete,omitempty"` + IsDelete string `json:"isDelete,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` @@ -174,16 +207,81 @@ type CatalogSpec struct { type GsmSpec struct { Name string `json:"name"` // Gsm name that will be used deploy StatefulSet - Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. + //Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for GSM StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // This parameter will not be used if you use OraGsmPvcName Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. PvcName string `json:"pvcName,omitempty"` Label string `json:"label,omitempty"` // Optional GSM Label - IsDelete bool `json:"isDelete,omitempty"` + IsDelete string `json:"isDelete,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Region string `json:"region,omitempty"` + DirectorName string `json:"directorName,omitempty"` +} + +// ShardGroupSpec Specification + +type GsmShardGroupSpec struct { + Name string `json:"name"` // Name of the shardgroup. + Region string `json:"region,omitempty"` + DeployAs string `json:"deployAs,omitempty"` +} + +// ShardSpace Specs +type GsmShardSpaceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Chunks int `json:"chunks,omitempty"` //chunks is optional + ProtectionMode string `json:"protectionMode,omitempty"` // Data guard protection mode + ShardGroup string `json:"shardGroup,omitempty"` +} + +// Service Definition +type GsmServiceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Available string `json:"available,omitempty"` + ClbGoal string `json:"clbGoal,omitempty"` + CommitOutcome string `json:"commitOutcome,omitempty"` + DrainTimeout string `json:"drainTimeout,omitempty"` + Dtp string `json:"dtp,omitempty"` + Edition string `json:"edition,omitempty"` + FailoverPrimary string `json:"failoverPrimary,omitempty"` + FailoverRestore string `json:"failoverRestore,omitempty"` + FailoverDelay string `json:"failoverDelay,omitempty"` + FailoverMethod string `json:"failoverMethod,omitempty"` + FailoverRetry string `json:"failoverRetry,omitempty"` + FailoverType string `json:"failoverType,omitempty"` + GdsPool string `json:"gdsPool,omitempty"` + Role string `json:"role,omitempty"` + SessionState string `json:"sessionState,omitempty"` + Lag int `json:"lag,omitempty"` + Locality string `json:"locality,omitempty"` + Notification string `json:"notification,omitempty"` + PdbName string `json:"pdbName,omitempty"` + Policy string `json:"policy,omitempty"` + Preferrred string `json:"preferred,omitempty"` + PreferredAll string `json:"prferredAll,omitempty"` + RegionFailover string `json:"regionFailover,omitempty"` + StopOption string `json:"stopOption,omitempty"` + SqlTrasactionProfile string `json:"sqlTransactionProfile,omitempty"` + TableFamily string `json:"tableFamily,omitempty"` + Retention string `json:"retention,omitempty"` + TfaPolicy string `json:"tfaPolicy,omitempty"` +} + +// Secret Details +type SecretDetails struct { + Name string `json:"name"` // Name of the secret. + KeyFileName string `json:"keyFileName,omitempty"` // Name of the key. + NsConfigMap string `json:"nsConfigMap,omitempty"` + NsSecret string `json:"nsSecret,omitempty"` + PwdFileName string `json:"pwdFileName"` + PwdFileMountLocation string `json:"pwdFileMountLocation,omitempty"` + KeyFileMountLocation string `json:"keyFileMountLocation,omitempty"` + KeySecretName string `json:"keySecretName,omitempty"` + EncryptionType string `json:"encryptionType,omitempty"` } // EnvironmentVariable represents a named variable accessible for containers. @@ -266,7 +364,8 @@ const ( // var var KubeConfigOnce sync.Once -const lastSuccessfulSpec = "lastSuccessfulSpec" +// #const lastSuccessfulSpec = "lastSuccessfulSpec" +const lastSuccessfulSpecOnsInfo = "lastSuccessfulSpeOnsInfo" // GetLastSuccessfulSpec returns spec from the lass successful reconciliation. // Returns nil, nil if there is no lastSuccessfulSpec. @@ -298,7 +397,28 @@ func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpec(kubeClient client.C lastSuccessfulSpec: string(specBytes), } - return annsv1.SetAnnotations(kubeClient, shardingv1, anns) + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + +// GetLastSuccessfulOnsInfo returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulOnsInfo() ([]byte, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpecOnsInfo] + if !ok { + return nil, nil + } + specBytes := []byte(val) + return specBytes, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpecOnsInfo(kubeClient client.Client, specBytes []byte) error { + + anns := map[string]string{ + lastSuccessfulSpecOnsInfo: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) } func init() { diff --git a/apis/database/v1alpha1/shardingdatabase_webhook.go b/apis/database/v1alpha1/shardingdatabase_webhook.go new file mode 100644 index 00000000..4e7ea2e7 --- /dev/null +++ b/apis/database/v1alpha1/shardingdatabase_webhook.go @@ -0,0 +1,314 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var shardingdatabaselog = logf.Log.WithName("shardingdatabase-resource") + +func (r *ShardingDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v1alpha1,name=mshardingdatabasev1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &ShardingDatabase{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *ShardingDatabase) Default() { + shardingdatabaselog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. + if r.Spec.GsmDevMode != "" { + r.Spec.GsmDevMode = "dev" + } + + if r.Spec.IsTdeWallet == "" { + r.Spec.IsTdeWallet = "disable" + } + for pindex := range r.Spec.Shard { + if strings.ToLower(r.Spec.Shard[pindex].IsDelete) == "" { + r.Spec.Shard[pindex].IsDelete = "disable" + } + } + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v1alpha1-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v1alpha1,name=vshardingdatabasev1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &ShardingDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateCreate() (admission.Warnings, error) { + shardingdatabaselog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + // Check Secret configuration + var validationErr field.ErrorList + var validationErrs1 field.ErrorList + + //namespaces := db.GetWatchNamespaces() + //_, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + // if len(namespaces) != 0 && !containsNamespace { + // validationErr = append(validationErr, + // field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + // "Oracle database operator doesn't watch over this namespace")) + //} + + if r.Spec.DbSecret == nil { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret"), r.Spec.DbSecret, + "DbSecret cannot be set to nil")) + } else { + if len(r.Spec.DbSecret.Name) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("Name"), r.Spec.DbSecret.Name, + "Secret name cannot be set empty")) + } + if len(r.Spec.DbSecret.PwdFileName) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileName"), r.Spec.DbSecret.PwdFileName, + "Password file name cannot be set empty")) + } + if strings.ToLower(r.Spec.DbSecret.EncryptionType) != "base64" { + if strings.ToLower(r.Spec.DbSecret.KeyFileName) == "" { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileName"), r.Spec.DbSecret.KeyFileName, + "Key file name cannot be empty")) + } + } + + /** + if len(r.Spec.DbSecret.PwdFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileMountLocation"), r.Spec.DbSecret.PwdFileMountLocation, + "Password file mount location cannot be empty")) + } + + if len(r.Spec.DbSecret.KeyFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileMountLocation"), r.Spec.DbSecret.KeyFileMountLocation, + "KeyFileMountLocation file mount location cannot be empty")) + } + **/ + } + + if r.Spec.IsTdeWallet == "enable" { + if (len(r.Spec.FssStorageClass) == 0) && (len(r.Spec.TdeWalletPvc) == 0) { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("FssStorageClass"), r.Spec.FssStorageClass, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("TdeWalletPvc"), r.Spec.TdeWalletPvc, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + } + } + + if r.Spec.IsTdeWallet != "" { + if (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "disable") { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("isTdeWallet"), r.Spec.IsTdeWallet, + "isTdeWallet can be set to only \"enable\" or \"disable\"")) + } + } + + validationErrs1 = r.validateShardIsDelete() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateFreeEdition() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateCatalogName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateShardName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + // TODO(user): fill in your validation logic upon object creation. + if len(validationErr) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "ShardingDatabase"}, + r.Name, validationErr) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + shardingdatabaselog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateDelete() (admission.Warnings, error) { + shardingdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// ###### Vlaidation Block ################# + +func (r *ShardingDatabase) validateShardIsDelete() field.ErrorList { + + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "disable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "failed") { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("isDelete"), r.Spec.Shard[pindex].IsDelete, + "r.Spec.Shard[pindex].IsDelete can be set to only enable|disable|failed")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateFreeEdition() field.ErrorList { + + var validationErrs field.ErrorList + if strings.ToLower(r.Spec.DbEdition) == "free" { + // Shard Spec Checks + for i := 0; i < len(r.Spec.Shard); i++ { + for index, variable := range r.Spec.Shard[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + // Catalog Spec Checks + for i := 0; i < len(r.Spec.Catalog); i++ { + for index, variable := range r.Spec.Catalog[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateShardName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if len(r.Spec.Shard[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("Name"), r.Spec.Shard[pindex].Name, + "Shard Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateCatalogName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Catalog { + if len(r.Spec.Catalog[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("catalog").Child("Name"), r.Spec.Catalog[pindex].Name, + "Catalog Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} diff --git a/apis/database/v1alpha1/singleinstancedatabase_conversion.go b/apis/database/v1alpha1/singleinstancedatabase_conversion.go new file mode 100644 index 00000000..76968dce --- /dev/null +++ b/apis/database/v1alpha1/singleinstancedatabase_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *SingleInstanceDatabase) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *SingleInstanceDatabase) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/singleinstancedatabase_types.go b/apis/database/v1alpha1/singleinstancedatabase_types.go index 7afb7ae6..36125d37 100644 --- a/apis/database/v1alpha1/singleinstancedatabase_types.go +++ b/apis/database/v1alpha1/singleinstancedatabase_types.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2023 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -50,42 +50,67 @@ type SingleInstanceDatabaseSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - // +kubebuilder:validation:Enum=standard;enterprise + // +kubebuilder:validation:Enum=standard;enterprise;express;free Edition string `json:"edition,omitempty"` - // SID can only have a-z , A-Z, 0-9 . It cant have any special characters + // SID must be alphanumeric (no special characters, only a-z, A-Z, 0-9), and no longer than 12 characters. // +k8s:openapi-gen=true // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]+$` - Sid string `json:"sid,omitempty"` - InstallApex bool `json:"installApex,omitempty"` - Charset string `json:"charset,omitempty"` - Pdbname string `json:"pdbName,omitempty"` - LoadBalancer bool `json:"loadBalancer,omitempty"` - FlashBack bool `json:"flashBack,omitempty"` - ArchiveLog bool `json:"archiveLog,omitempty"` - ForceLogging bool `json:"forceLog,omitempty"` - - CloneFrom string `json:"cloneFrom,omitempty"` - ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + // +kubebuilder:validation:MaxLength:=12 + Sid string `json:"sid,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ListenerPort int `json:"listenerPort,omitempty"` + TcpsListenerPort int `json:"tcpsListenerPort,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + FlashBack *bool `json:"flashBack,omitempty"` + ArchiveLog *bool `json:"archiveLog,omitempty"` + ForceLogging *bool `json:"forceLog,omitempty"` + EnableTCPS bool `json:"enableTCPS,omitempty"` + TcpsCertRenewInterval string `json:"tcpsCertRenewInterval,omitempty"` + TcpsTlsSecret string `json:"tcpsTlsSecret,omitempty"` + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + // +kubebuilder:validation:Enum=primary;standby;clone;truecache + CreateAs string `json:"createAs,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + TrueCacheServices []string `json:"trueCacheServices,omitempty"` // +k8s:openapi-gen=true - // +kubebuilder:validation:Minimum=1 - Replicas int `json:"replicas"` + Replicas int `json:"replicas,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` - AdminPassword SingleInstanceDatabaseAdminPassword `json:"adminPassword"` + AdminPassword SingleInstanceDatabaseAdminPassword `json:"adminPassword,omitempty"` Image SingleInstanceDatabaseImage `json:"image"` - Persistence SingleInstanceDatabasePersistence `json:"persistence"` - InitParams SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Persistence SingleInstanceDatabasePersistence `json:"persistence,omitempty"` + InitParams *SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Resources SingleInstanceDatabaseResources `json:"resources,omitempty"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` +} + +type SingleInstanceDatabaseResource struct { + Cpu string `json:"cpu,omitempty"` + Memory string `json:"memory,omitempty"` +} + +type SingleInstanceDatabaseResources struct { + Requests *SingleInstanceDatabaseResource `json:"requests,omitempty"` + Limits *SingleInstanceDatabaseResource `json:"limits,omitempty"` } // SingleInstanceDatabasePersistence defines the storage size and class for PVC type SingleInstanceDatabasePersistence struct { - Size string `json:"size"` - StorageClass string `json:"storageClass"` - + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany - AccessMode string `json:"accessMode"` + AccessMode string `json:"accessMode,omitempty"` + DatafilesVolumeName string `json:"datafilesVolumeName,omitempty"` + ScriptsVolumeName string `json:"scriptsVolumeName,omitempty"` + VolumeClaimAnnotation string `json:"volumeClaimAnnotation,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` } // SingleInstanceDatabaseInitParams defines the Init Parameters @@ -101,13 +126,15 @@ type SingleInstanceDatabaseImage struct { Version string `json:"version,omitempty"` PullFrom string `json:"pullFrom"` PullSecrets string `json:"pullSecrets,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` } // SingleInsatnceAdminPassword defines the secret containing Admin Password mapped to secretKey for Database type SingleInstanceDatabaseAdminPassword struct { SecretName string `json:"secretName"` - SecretKey string `json:"secretKey"` - KeepSecret bool `json:"keepSecret,omitempty"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` } // SingleInstanceDatabaseStatus defines the observed state of SingleInstanceDatabase @@ -115,30 +142,44 @@ type SingleInstanceDatabaseStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - Nodes []string `json:"nodes,omitempty"` - Role string `json:"role,omitempty"` - Status string `json:"status,omitempty"` - Replicas int `json:"replicas"` - ReleaseUpdate string `json:"releaseUpdate,omitempty"` + Nodes []string `json:"nodes,omitempty"` + Role string `json:"role,omitempty"` + Status string `json:"status,omitempty"` + Replicas int `json:"replicas,omitempty"` + ReleaseUpdate string `json:"releaseUpdate,omitempty"` + DgBroker *string `json:"dgBroker,omitempty"` + // +kubebuilder:default:="false" DatafilesPatched string `json:"datafilesPatched,omitempty"` ConnectString string `json:"connectString,omitempty"` ClusterConnectString string `json:"clusterConnectString,omitempty"` + TcpsConnectString string `json:"tcpsConnectString,omitempty"` StandbyDatabases map[string]string `json:"standbyDatabases,omitempty"` - DatafilesCreated string `json:"datafilesCreated,omitempty"` - Sid string `json:"sid,omitempty"` - Edition string `json:"edition,omitempty"` - Charset string `json:"charset,omitempty"` - Pdbname string `json:"pdbName,omitempty"` - InitSgaSize int `json:"initSgaSize,omitempty"` - InitPgaSize int `json:"initPgaSize,omitempty"` - CloneFrom string `json:"cloneFrom,omitempty"` - FlashBack string `json:"flashBack,omitempty"` - ArchiveLog string `json:"archiveLog,omitempty"` - ForceLogging string `json:"forceLog,omitempty"` - OemExpressUrl string `json:"oemExpressUrl,omitempty"` - OrdsReference string `json:"ordsReference,omitempty"` - PdbConnectString string `json:"pdbConnectString,omitempty"` - ApexInstalled bool `json:"apexInstalled,omitempty"` + // +kubebuilder:default:="false" + DatafilesCreated string `json:"datafilesCreated,omitempty"` + Sid string `json:"sid,omitempty"` + Edition string `json:"edition,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + InitSgaSize int `json:"initSgaSize,omitempty"` + InitPgaSize int `json:"initPgaSize,omitempty"` + CreatedAs string `json:"createdAs,omitempty"` + FlashBack string `json:"flashBack,omitempty"` + ArchiveLog string `json:"archiveLog,omitempty"` + ForceLogging string `json:"forceLog,omitempty"` + OemExpressUrl string `json:"oemExpressUrl,omitempty"` + OrdsReference string `json:"ordsReference,omitempty"` + PdbConnectString string `json:"pdbConnectString,omitempty"` + TcpsPdbConnectString string `json:"tcpsPdbConnectString,omitempty"` + ApexInstalled bool `json:"apexInstalled,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` + // +kubebuilder:default:=false + IsTcpsEnabled bool `json:"isTcpsEnabled"` + CertCreationTimestamp string `json:"certCreationTimestamp,omitempty"` + CertRenewInterval string `json:"certRenewInterval,omitempty"` + ClientWalletLoc string `json:"clientWalletLoc,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + // +kubebuilder:default:="" + TcpsTlsSecret string `json:"tcpsTlsSecret"` // +patchMergeKey=type // +patchStrategy=merge @@ -148,17 +189,22 @@ type SingleInstanceDatabaseStatus struct { InitParams SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` Persistence SingleInstanceDatabasePersistence `json:"persistence"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas // +kubebuilder:printcolumn:JSONPath=".status.edition",name="Edition",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.sid",name="Sid",type="string",priority=1 // +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" -// +kubebuilder:printcolumn:JSONPath=".status.role",name="Role",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.role",name="Role",type="string" // +kubebuilder:printcolumn:JSONPath=".status.releaseUpdate",name="Version",type="string" // +kubebuilder:printcolumn:JSONPath=".status.connectString",name="Connect Str",type="string" // +kubebuilder:printcolumn:JSONPath=".status.pdbConnectString",name="Pdb Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.tcpsConnectString",name="TCPS Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.tcpsPdbConnectString",name="TCPS Pdb Connect Str",type="string", priority=1 // +kubebuilder:printcolumn:JSONPath=".status.oemExpressUrl",name="Oem Express Url",type="string" // SingleInstanceDatabase is the Schema for the singleinstancedatabases API diff --git a/apis/database/v1alpha1/singleinstancedatabase_webhook.go b/apis/database/v1alpha1/singleinstancedatabase_webhook.go index 1f7b44bb..bc095f7c 100644 --- a/apis/database/v1alpha1/singleinstancedatabase_webhook.go +++ b/apis/database/v1alpha1/singleinstancedatabase_webhook.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2023 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -39,7 +39,9 @@ package v1alpha1 import ( + "strconv" "strings" + "time" dbcommons "github.com/oracle/oracle-database-operator/commons/database" @@ -50,6 +52,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -71,10 +74,71 @@ var _ webhook.Defaulter = &SingleInstanceDatabase{} func (r *SingleInstanceDatabase) Default() { singleinstancedatabaselog.Info("default", "name", r.Name) - if r.Spec.Edition == "express" { - r.Spec.Replicas = 1 + if r.Spec.LoadBalancer { + // Annotations required for a flexible load balancer on oci + if r.Spec.ServiceAnnotations == nil { + r.Spec.ServiceAnnotations = make(map[string]string) + } + _, ok := r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape"] = "flexible" + } + _, ok = r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-min"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-min"] = "10" + } + _, ok = r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-max"] + if !ok { + r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-max"] = "100" + } + } + + if r.Spec.AdminPassword.KeepSecret == nil { + keepSecret := true + r.Spec.AdminPassword.KeepSecret = &keepSecret + } + + if r.Spec.Edition == "" { + if r.Spec.CreateAs == "clone" && !r.Spec.Image.PrebuiltDB { + r.Spec.Edition = "enterprise" + } + } + + if r.Spec.CreateAs == "" { + r.Spec.CreateAs = "primary" + } + + if r.Spec.Sid == "" { + if r.Spec.Edition == "express" { + r.Spec.Sid = "XE" + } else if r.Spec.Edition == "free" { + r.Spec.Sid = "FREE" + } else { + r.Spec.Sid = "ORCLCDB" + } + } + + if r.Spec.Pdbname == "" { + if r.Spec.Edition == "express" { + r.Spec.Pdbname = "XEPDB1" + } else if r.Spec.Edition == "free" { + r.Spec.Pdbname = "FREEPDB1" + } else { + r.Spec.Pdbname = "ORCLPDB1" + } + } + + if r.Spec.Edition == "express" || r.Spec.Edition == "free" { + // Allow zero replicas as a means to bounce the DB + if r.Status.Replicas == 1 && r.Spec.Replicas > 1 { + // If not zero, default the replicas to 1 + r.Spec.Replicas = 1 + } + } + + if r.Spec.TrueCacheServices == nil { + r.Spec.TrueCacheServices = make([]string, 0) } - // TODO(user): fill in your defaulting logic. } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -83,67 +147,355 @@ func (r *SingleInstanceDatabase) Default() { var _ webhook.Validator = &SingleInstanceDatabase{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *SingleInstanceDatabase) ValidateCreate() error { +func (r *SingleInstanceDatabase) ValidateCreate() (admission.Warnings, error) { singleinstancedatabaselog.Info("validate create", "name", r.Name) var allErrs field.ErrorList - if r.Spec.Persistence.AccessMode == "ReadWriteOnce" && r.Spec.Replicas != 1 { + namespaces := dbcommons.GetWatchNamespaces() + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Persistence spec validation + if r.Spec.Persistence.Size == "" && (r.Spec.Persistence.AccessMode != "" || + r.Spec.Persistence.StorageClass != "" || r.Spec.Persistence.DatafilesVolumeName != "") { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("size"), r.Spec.Persistence, + "invalid persistence specification, specify required size")) + } + + if r.Spec.Persistence.Size != "" { + if r.Spec.Persistence.AccessMode == "" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("size"), r.Spec.Persistence, + "invalid persistence specification, specify accessMode")) + } + if r.Spec.Persistence.AccessMode != "ReadWriteMany" && r.Spec.Persistence.AccessMode != "ReadWriteOnce" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("accessMode"), + r.Spec.Persistence.AccessMode, "should be either \"ReadWriteOnce\" or \"ReadWriteMany\"")) + } + } + + if r.Spec.CreateAs == "standby" { + if r.Spec.ArchiveLog != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("archiveLog"), + r.Spec.ArchiveLog, "archiveLog cannot be specified for standby databases")) + } + if r.Spec.FlashBack != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("flashBack"), + r.Spec.FlashBack, "flashBack cannot be specified for standby databases")) + } + if r.Spec.ForceLogging != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("forceLog"), + r.Spec.ForceLogging, "forceLog cannot be specified for standby databases")) + } + if r.Spec.InitParams != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("initParams"), + r.Spec.InitParams, "initParams cannot be specified for standby databases")) + } + if r.Spec.Persistence.ScriptsVolumeName != "" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("scriptsVolumeName"), + r.Spec.Persistence.ScriptsVolumeName, "scriptsVolumeName cannot be specified for standby databases")) + } + if r.Spec.EnableTCPS { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("enableTCPS"), + r.Spec.EnableTCPS, "enableTCPS cannot be specified for standby databases")) + } + + } + + // Replica validation + if r.Spec.Replicas > 1 { + valMsg := "" + if r.Spec.Edition == "express" || r.Spec.Edition == "free" { + valMsg = "should be 1 for " + r.Spec.Edition + " edition" + } + if r.Spec.Persistence.Size == "" { + valMsg = "should be 1 if no persistence is specified" + } + if valMsg != "" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("replicas"), r.Spec.Replicas, valMsg)) + } + } + + if (r.Spec.CreateAs == "clone" || r.Spec.CreateAs == "standby") && r.Spec.PrimaryDatabaseRef == "" { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec").Child("replicas"), r.Spec.Replicas, - "should be 1 for accessMode \"ReadWriteOnce\"")) + field.Invalid(field.NewPath("spec").Child("primaryDatabaseRef"), r.Spec.PrimaryDatabaseRef, "Primary Database reference cannot be null for a secondary database")) + } + + if r.Spec.Edition == "express" || r.Spec.Edition == "free" { + if r.Spec.CreateAs == "clone" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("createAs"), r.Spec.CreateAs, + "Cloning not supported for "+r.Spec.Edition+" edition")) + } + if r.Spec.CreateAs == "standby" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("createAs"), r.Spec.CreateAs, + "Physical Standby Database creation is not supported for "+r.Spec.Edition+" edition")) + } + if r.Spec.Edition == "express" && strings.ToUpper(r.Spec.Sid) != "XE" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("sid"), r.Spec.Sid, + "Express edition SID must only be XE")) + } + if r.Spec.Edition == "free" && strings.ToUpper(r.Spec.Sid) != "FREE" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("sid"), r.Spec.Sid, + "Free edition SID must only be FREE")) + } + if r.Spec.Edition == "express" && strings.ToUpper(r.Spec.Pdbname) != "XEPDB1" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("pdbName"), r.Spec.Pdbname, + "Express edition PDB must be XEPDB1")) + } + if r.Spec.Edition == "free" && strings.ToUpper(r.Spec.Pdbname) != "FREEPDB1" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("pdbName"), r.Spec.Pdbname, + "Free edition PDB must be FREEPDB1")) + } + if r.Spec.InitParams != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("initParams"), *r.Spec.InitParams, + r.Spec.Edition+" edition does not support changing init parameters")) + } + } else { + if r.Spec.Sid == "XE" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("sid"), r.Spec.Sid, + "XE is reserved as the SID for Express edition of the database")) + } + if r.Spec.Sid == "FREE" { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("sid"), r.Spec.Sid, + "FREE is reserved as the SID for FREE edition of the database")) + } + } + + if r.Spec.CreateAs == "clone" { + if r.Spec.Image.PrebuiltDB { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("createAs"), r.Spec.CreateAs, + "cannot clone to create a prebuilt db")) + } else if strings.Contains(r.Spec.PrimaryDatabaseRef, ":") && strings.Contains(r.Spec.PrimaryDatabaseRef, "/") && r.Spec.Edition == "" { + //Edition must be passed when cloning from a source database other than same k8s cluster + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("edition"), r.Spec.CreateAs, + "Edition must be passed when cloning from a source database other than same k8s cluster")) + } + } + + if r.Spec.CreateAs != "truecache" { + if len(r.Spec.TrueCacheServices) > 0 { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("trueCacheServices"), r.Spec.TrueCacheServices, + "Creation of trueCacheServices only supported with True Cache instances")) + } + } + + if r.Status.FlashBack == "true" && r.Spec.FlashBack != nil && *r.Spec.FlashBack { + if r.Spec.ArchiveLog != nil && !*r.Spec.ArchiveLog { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("archiveLog"), r.Spec.ArchiveLog, + "Cannot disable Archivelog. Please disable Flashback first.")) + } + } + + if r.Status.ArchiveLog == "false" && r.Spec.ArchiveLog != nil && !*r.Spec.ArchiveLog { + if *r.Spec.FlashBack { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("flashBack"), r.Spec.FlashBack, + "Cannot enable Flashback. Please enable Archivelog first.")) + } } - if r.Spec.Edition == "express" && r.Spec.CloneFrom != "" { + + if r.Spec.Persistence.VolumeClaimAnnotation != "" { + strParts := strings.Split(r.Spec.Persistence.VolumeClaimAnnotation, ":") + if len(strParts) != 2 { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("persistence").Child("volumeClaimAnnotation"), r.Spec.Persistence.VolumeClaimAnnotation, + "volumeClaimAnnotation should be in : format.")) + } + } + + // servicePort and tcpServicePort validation + if !r.Spec.LoadBalancer { + // NodePort service is expected. In this case servicePort should be in range 30000-32767 + if r.Spec.ListenerPort != 0 && (r.Spec.ListenerPort < 30000 || r.Spec.ListenerPort > 32767) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("listenerPort"), r.Spec.ListenerPort, + "listenerPort should be in 30000-32767 range.")) + } + if r.Spec.EnableTCPS && r.Spec.TcpsListenerPort != 0 && (r.Spec.TcpsListenerPort < 30000 || r.Spec.TcpsListenerPort > 32767) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("tcpsListenerPort"), r.Spec.TcpsListenerPort, + "tcpsListenerPort should be in 30000-32767 range.")) + } + } else { + // LoadBalancer Service is expected. + if r.Spec.EnableTCPS && r.Spec.TcpsListenerPort == 0 && r.Spec.ListenerPort == int(dbcommons.CONTAINER_TCPS_PORT) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("listenerPort"), r.Spec.ListenerPort, + "listenerPort can not be 2484 as the default port for tcpsListenerPort is 2484.")) + } + } + + if r.Spec.EnableTCPS && r.Spec.ListenerPort != 0 && r.Spec.TcpsListenerPort != 0 && r.Spec.ListenerPort == r.Spec.TcpsListenerPort { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec").Child("cloneFrom"), r.Spec.CloneFrom, - "Cloning not supported for Express edition")) + field.Invalid(field.NewPath("spec").Child("tcpsListenerPort"), r.Spec.TcpsListenerPort, + "listenerPort and tcpsListenerPort can not be equal.")) + } + + // Certificate Renew Duration Validation + if r.Spec.EnableTCPS && r.Spec.TcpsCertRenewInterval != "" { + duration, err := time.ParseDuration(r.Spec.TcpsCertRenewInterval) + if err != nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("tcpsCertRenewInterval"), r.Spec.TcpsCertRenewInterval, + "Please provide valid string to parse the tcpsCertRenewInterval.")) + } + maxLimit, _ := time.ParseDuration("8760h") + minLimit, _ := time.ParseDuration("24h") + if duration > maxLimit || duration < minLimit { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("tcpsCertRenewInterval"), r.Spec.TcpsCertRenewInterval, + "Please specify tcpsCertRenewInterval in the range: 24h to 8760h")) + } } - if r.Spec.Edition == "express" && strings.ToUpper(r.Spec.Sid) != "XE" { + + // tcpsTlsSecret validations + if !r.Spec.EnableTCPS && r.Spec.TcpsTlsSecret != "" { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec").Child("sid"), r.Spec.Sid, - "Express edition SID must be XE")) + field.Forbidden(field.NewPath("spec").Child("tcpsTlsSecret"), + " is allowed only if enableTCPS is true")) } - if r.Spec.Edition == "express" && strings.ToUpper(r.Spec.Pdbname) != "XEPDB1" { + if r.Spec.TcpsTlsSecret != "" && r.Spec.TcpsCertRenewInterval != "" { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec").Child("pdbName"), r.Spec.Pdbname, - "Express edition PDB must be XEPDB1")) + field.Forbidden(field.NewPath("spec").Child("tcpsCertRenewInterval"), + " is applicable only for self signed certs")) + } + + if r.Spec.InitParams != nil { + if (r.Spec.InitParams.PgaAggregateTarget != 0 && r.Spec.InitParams.SgaTarget == 0) || (r.Spec.InitParams.PgaAggregateTarget == 0 && r.Spec.InitParams.SgaTarget != 0) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("initParams"), + r.Spec.InitParams, "initParams value invalid : Provide values for both pgaAggregateTarget and SgaTarget")) + } } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "database.oracle.com", Kind: "SingleInstanceDatabase"}, r.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *SingleInstanceDatabase) ValidateUpdate(oldRuntimeObject runtime.Object) error { +func (r *SingleInstanceDatabase) ValidateUpdate(oldRuntimeObject runtime.Object) (admission.Warnings, error) { singleinstancedatabaselog.Info("validate update", "name", r.Name) var allErrs field.ErrorList // check creation validations first - err := r.ValidateCreate() + warnings, err := r.ValidateCreate() if err != nil { - return err + return warnings, err } // Validate Deletion if r.GetDeletionTimestamp() != nil { - err := r.ValidateDelete() + warnings, err := r.ValidateDelete() if err != nil { - return err + return warnings, err } } + // Now check for updation errors old, ok := oldRuntimeObject.(*SingleInstanceDatabase) if !ok { - return nil + return nil, nil } - edition := r.Spec.Edition - if r.Spec.Edition == "" { - edition = "Enterprise" + + if old.Status.CreatedAs == "clone" { + if r.Spec.Edition != "" && old.Status.Edition != "" && !strings.EqualFold(old.Status.Edition, r.Spec.Edition) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("edition"), "Edition of a cloned singleinstancedatabase cannot be changed post creation")) + } + + if !strings.EqualFold(old.Status.PrimaryDatabase, r.Spec.PrimaryDatabaseRef) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("primaryDatabaseRef"), "Primary database of a cloned singleinstancedatabase cannot be changed post creation")) + } + } + + if old.Status.Role != dbcommons.ValueUnavailable && old.Status.Role != "PRIMARY" { + // Restriciting Patching of secondary databases archiveLog, forceLog, flashBack + statusArchiveLog, _ := strconv.ParseBool(old.Status.ArchiveLog) + if r.Spec.ArchiveLog != nil && (statusArchiveLog != *r.Spec.ArchiveLog) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("archiveLog"), "cannot be changed")) + } + statusFlashBack, _ := strconv.ParseBool(old.Status.FlashBack) + if r.Spec.FlashBack != nil && (statusFlashBack != *r.Spec.FlashBack) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("flashBack"), "cannot be changed")) + } + statusForceLogging, _ := strconv.ParseBool(old.Status.ForceLogging) + if r.Spec.ForceLogging != nil && (statusForceLogging != *r.Spec.ForceLogging) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("forceLog"), "cannot be changed")) + } + + // Restriciting Patching of secondary databases InitParams + if r.Spec.InitParams != nil { + if old.Status.InitParams.SgaTarget != r.Spec.InitParams.SgaTarget { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("initParams").Child("sgaTarget"), "cannot be changed")) + } + if old.Status.InitParams.PgaAggregateTarget != r.Spec.InitParams.PgaAggregateTarget { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("initParams").Child("pgaAggregateTarget"), "cannot be changed")) + } + if old.Status.InitParams.CpuCount != r.Spec.InitParams.CpuCount { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("initParams").Child("cpuCount"), "cannot be changed")) + } + if old.Status.InitParams.Processes != r.Spec.InitParams.Processes { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("initParams").Child("processes"), "cannot be changed")) + } + } + } + + // if Db is in a dataguard configuration or referred by Standby databases then Restrict enabling Tcps on the Primary DB + if r.Spec.EnableTCPS { + if old.Status.DgBroker != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("enableTCPS"), "cannot enable tcps as database is in a dataguard configuration")) + } else if len(old.Status.StandbyDatabases) != 0 { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("enableTCPS"), "cannot enable tcps as database is referred by one or more standby databases")) + } } - if r.Spec.CloneFrom == "" && old.Status.Edition != "" && !strings.EqualFold(old.Status.Edition, edition) { + + if old.Status.DatafilesCreated == "true" && (old.Status.PrebuiltDB != r.Spec.Image.PrebuiltDB) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("image").Child("prebuiltDB"), "cannot be changed")) + } + if r.Spec.Edition != "" && old.Status.Edition != "" && !strings.EqualFold(old.Status.Edition, r.Spec.Edition) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("edition"), "cannot be changed")) } @@ -159,37 +511,43 @@ func (r *SingleInstanceDatabase) ValidateUpdate(oldRuntimeObject runtime.Object) allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("pdbname"), "cannot be changed")) } - if old.Status.CloneFrom != "" && - (old.Status.CloneFrom == dbcommons.NoCloneRef && r.Spec.CloneFrom != "" || - old.Status.CloneFrom != dbcommons.NoCloneRef && old.Status.CloneFrom != r.Spec.CloneFrom) { + if old.Status.CreatedAs == "clone" && + (old.Status.PrimaryDatabase == dbcommons.ValueUnavailable && r.Spec.PrimaryDatabaseRef != "" || + old.Status.PrimaryDatabase != dbcommons.ValueUnavailable && old.Status.PrimaryDatabase != r.Spec.PrimaryDatabaseRef) { allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("cloneFrom"), "cannot be changed")) + field.Forbidden(field.NewPath("spec").Child("primaryDatabaseRef"), "cannot be changed")) } if old.Status.OrdsReference != "" && r.Status.Persistence != r.Spec.Persistence { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("persistence"), "uninstall ORDS to change Persistence")) } + + if old.Status.Replicas != r.Spec.Replicas && old.Status.DgBroker != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be updated for a database in a Data Guard configuration")) + } + if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "database.oracle.com", Kind: "SingleInstanceDatabase"}, r.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *SingleInstanceDatabase) ValidateDelete() error { +func (r *SingleInstanceDatabase) ValidateDelete() (admission.Warnings, error) { singleinstancedatabaselog.Info("validate delete", "name", r.Name) var allErrs field.ErrorList if r.Status.OrdsReference != "" { allErrs = append(allErrs, - field.Forbidden(field.NewPath("status").Child("ordsInstalled"), "uninstall ORDS to cleanup this SIDB")) + field.Forbidden(field.NewPath("status").Child("ordsReference"), "delete "+r.Status.OrdsReference+" to cleanup this SIDB")) } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "database.oracle.com", Kind: "SingleInstanceDatabase"}, r.Name, allErrs) } diff --git a/apis/database/v1alpha1/webhook_suite_test.go b/apis/database/v1alpha1/webhook_suite_test.go new file mode 100644 index 00000000..e28925e6 --- /dev/null +++ b/apis/database/v1alpha1/webhook_suite_test.go @@ -0,0 +1,212 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + //+kubebuilder:scaffold:imports + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// To avoid dot import +var ( + BeforeSuite = ginkgo.BeforeSuite + AfterSuite = ginkgo.AfterSuite + Describe = ginkgo.Describe + PDescribe = ginkgo.PDescribe + JustBeforeEach = ginkgo.JustBeforeEach + BeforeEach = ginkgo.BeforeEach + AfterEach = ginkgo.AfterEach + Context = ginkgo.Context + By = ginkgo.By + It = ginkgo.It + FIt = ginkgo.FIt + PIt = ginkgo.PIt + Eventually = gomega.Eventually + Expect = gomega.Expect + Succeed = gomega.Succeed + HaveOccurred = gomega.HaveOccurred + BeNil = gomega.BeNil + Equal = gomega.Equal + BeTrue = gomega.BeTrue + BeFalse = gomega.BeFalse + ContainSubstring = gomega.ContainSubstring +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, + }, + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + WebhookServer: webhook.NewServer(webhook.Options{ + Port: webhookInstallOptions.LocalServingPort, + Host: webhookInstallOptions.LocalServingHost, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&AutonomousDatabase{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&AutonomousDatabaseBackup{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&AutonomousDatabaseRestore{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&AutonomousContainerDatabase{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:webhook + + go func() { + defer ginkgo.GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +func validateInvalidTest(obj client.Object, isUpdate bool, expMsgList ...string) { + var err error + + if !isUpdate { + err = k8sClient.Create(context.TODO(), obj) + } else { + err = k8sClient.Update(context.TODO(), obj) + } + + jsonBytes, jsonErr := json.MarshalIndent(obj, "", " ") + Expect(jsonErr).ToNot(HaveOccurred()) + Expect(err).To(HaveOccurred(), "%s: %v", obj.GetObjectKind().GroupVersionKind().Kind, string(jsonBytes)) + + statusErr := &k8sErrors.StatusError{} + Expect(errors.As(err, &statusErr)).To(BeTrue()) + + for _, msg := range expMsgList { + Expect(statusErr.ErrStatus.Message).To(ContainSubstring(msg)) + } +} diff --git a/apis/database/v1alpha1/zz_generated.deepcopy.go b/apis/database/v1alpha1/zz_generated.deepcopy.go index 2766d559..b20cf834 100644 --- a/apis/database/v1alpha1/zz_generated.deepcopy.go +++ b/apis/database/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -48,8 +48,1947 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcdSpec) DeepCopyInto(out *AcdSpec) { + *out = *in + in.K8sAcd.DeepCopyInto(&out.K8sAcd) + in.OciAcd.DeepCopyInto(&out.OciAcd) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcdSpec. +func (in *AcdSpec) DeepCopy() *AcdSpec { + if in == nil { + return nil + } + out := new(AcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabase) DeepCopyInto(out *AutonomousContainerDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabase. +func (in *AutonomousContainerDatabase) DeepCopy() *AutonomousContainerDatabase { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseList) DeepCopyInto(out *AutonomousContainerDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousContainerDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseList. +func (in *AutonomousContainerDatabaseList) DeepCopy() *AutonomousContainerDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseSpec) DeepCopyInto(out *AutonomousContainerDatabaseSpec) { + *out = *in + if in.AutonomousContainerDatabaseOCID != nil { + in, out := &in.AutonomousContainerDatabaseOCID, &out.AutonomousContainerDatabaseOCID + *out = new(string) + **out = **in + } + if in.CompartmentOCID != nil { + in, out := &in.CompartmentOCID, &out.CompartmentOCID + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousExadataVMClusterOCID != nil { + in, out := &in.AutonomousExadataVMClusterOCID, &out.AutonomousExadataVMClusterOCID + *out = new(string) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseSpec. +func (in *AutonomousContainerDatabaseSpec) DeepCopy() *AutonomousContainerDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseStatus) DeepCopyInto(out *AutonomousContainerDatabaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseStatus. +func (in *AutonomousContainerDatabaseStatus) DeepCopy() *AutonomousContainerDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AutonomousDatabase) DeepCopyInto(out *AutonomousDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabase. +func (in *AutonomousDatabase) DeepCopy() *AutonomousDatabase { + if in == nil { + return nil + } + out := new(AutonomousDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackup) DeepCopyInto(out *AutonomousDatabaseBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackup. +func (in *AutonomousDatabaseBackup) DeepCopy() *AutonomousDatabaseBackup { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupList) DeepCopyInto(out *AutonomousDatabaseBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupList. +func (in *AutonomousDatabaseBackupList) DeepCopy() *AutonomousDatabaseBackupList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupSpec) DeepCopyInto(out *AutonomousDatabaseBackupSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousDatabaseBackupOCID != nil { + in, out := &in.AutonomousDatabaseBackupOCID, &out.AutonomousDatabaseBackupOCID + *out = new(string) + **out = **in + } + if in.IsLongTermBackup != nil { + in, out := &in.IsLongTermBackup, &out.IsLongTermBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodInDays != nil { + in, out := &in.RetentionPeriodInDays, &out.RetentionPeriodInDays + *out = new(int) + **out = **in + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupSpec. +func (in *AutonomousDatabaseBackupSpec) DeepCopy() *AutonomousDatabaseBackupSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupStatus) DeepCopyInto(out *AutonomousDatabaseBackupStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupStatus. +func (in *AutonomousDatabaseBackupStatus) DeepCopy() *AutonomousDatabaseBackupStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBase) DeepCopyInto(out *AutonomousDatabaseBase) { + *out = *in + if in.CompartmentId != nil { + in, out := &in.CompartmentId, &out.CompartmentId + *out = new(string) + **out = **in + } + in.AutonomousContainerDatabase.DeepCopyInto(&out.AutonomousContainerDatabase) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DbName != nil { + in, out := &in.DbName, &out.DbName + *out = new(string) + **out = **in + } + if in.DbVersion != nil { + in, out := &in.DbVersion, &out.DbVersion + *out = new(string) + **out = **in + } + if in.DataStorageSizeInTBs != nil { + in, out := &in.DataStorageSizeInTBs, &out.DataStorageSizeInTBs + *out = new(int) + **out = **in + } + if in.CpuCoreCount != nil { + in, out := &in.CpuCoreCount, &out.CpuCoreCount + *out = new(int) + **out = **in + } + if in.ComputeCount != nil { + in, out := &in.ComputeCount, &out.ComputeCount + *out = new(float32) + **out = **in + } + if in.OcpuCount != nil { + in, out := &in.OcpuCount, &out.OcpuCount + *out = new(float32) + **out = **in + } + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.IsAutoScalingEnabled != nil { + in, out := &in.IsAutoScalingEnabled, &out.IsAutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.IsDedicated != nil { + in, out := &in.IsDedicated, &out.IsDedicated + *out = new(bool) + **out = **in + } + if in.IsFreeTier != nil { + in, out := &in.IsFreeTier, &out.IsFreeTier + *out = new(bool) + **out = **in + } + if in.IsAccessControlEnabled != nil { + in, out := &in.IsAccessControlEnabled, &out.IsAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.WhitelistedIps != nil { + in, out := &in.WhitelistedIps, &out.WhitelistedIps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SubnetId != nil { + in, out := &in.SubnetId, &out.SubnetId + *out = new(string) + **out = **in + } + if in.NsgIds != nil { + in, out := &in.NsgIds, &out.NsgIds + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PrivateEndpointLabel != nil { + in, out := &in.PrivateEndpointLabel, &out.PrivateEndpointLabel + *out = new(string) + **out = **in + } + if in.IsMtlsConnectionRequired != nil { + in, out := &in.IsMtlsConnectionRequired, &out.IsMtlsConnectionRequired + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBase. +func (in *AutonomousDatabaseBase) DeepCopy() *AutonomousDatabaseBase { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseClone) DeepCopyInto(out *AutonomousDatabaseClone) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseClone. +func (in *AutonomousDatabaseClone) DeepCopy() *AutonomousDatabaseClone { + if in == nil { + return nil + } + out := new(AutonomousDatabaseClone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseDetails. +func (in *AutonomousDatabaseDetails) DeepCopy() *AutonomousDatabaseDetails { + if in == nil { + return nil + } + out := new(AutonomousDatabaseDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseList) DeepCopyInto(out *AutonomousDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseList. +func (in *AutonomousDatabaseList) DeepCopy() *AutonomousDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestore) DeepCopyInto(out *AutonomousDatabaseRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestore. +func (in *AutonomousDatabaseRestore) DeepCopy() *AutonomousDatabaseRestore { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreList) DeepCopyInto(out *AutonomousDatabaseRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreList. +func (in *AutonomousDatabaseRestoreList) DeepCopy() *AutonomousDatabaseRestoreList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreSpec) DeepCopyInto(out *AutonomousDatabaseRestoreSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + in.Source.DeepCopyInto(&out.Source) + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreSpec. +func (in *AutonomousDatabaseRestoreSpec) DeepCopy() *AutonomousDatabaseRestoreSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreStatus) DeepCopyInto(out *AutonomousDatabaseRestoreStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreStatus. +func (in *AutonomousDatabaseRestoreStatus) DeepCopy() *AutonomousDatabaseRestoreStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseSpec) DeepCopyInto(out *AutonomousDatabaseSpec) { + *out = *in + in.Details.DeepCopyInto(&out.Details) + in.Clone.DeepCopyInto(&out.Clone) + in.Wallet.DeepCopyInto(&out.Wallet) + in.OciConfig.DeepCopyInto(&out.OciConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseSpec. +func (in *AutonomousDatabaseSpec) DeepCopy() *AutonomousDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseStatus) DeepCopyInto(out *AutonomousDatabaseStatus) { + *out = *in + if in.AllConnectionStrings != nil { + in, out := &in.AllConnectionStrings, &out.AllConnectionStrings + *out = make([]ConnectionStringProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseStatus. +func (in *AutonomousDatabaseStatus) DeepCopy() *AutonomousDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backupconfig) DeepCopyInto(out *Backupconfig) { + *out = *in + if in.AutoBackupEnabled != nil { + in, out := &in.AutoBackupEnabled, &out.AutoBackupEnabled + *out = new(bool) + **out = **in + } + if in.RecoveryWindowsInDays != nil { + in, out := &in.RecoveryWindowsInDays, &out.RecoveryWindowsInDays + *out = new(int) + **out = **in + } + if in.AutoBackupWindow != nil { + in, out := &in.AutoBackupWindow, &out.AutoBackupWindow + *out = new(string) + **out = **in + } + if in.BackupDestinationDetails != nil { + in, out := &in.BackupDestinationDetails, &out.BackupDestinationDetails + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backupconfig. +func (in *Backupconfig) DeepCopy() *Backupconfig { + if in == nil { + return nil + } + out := new(Backupconfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDB) DeepCopyInto(out *CDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDB. +func (in *CDB) DeepCopy() *CDB { + if in == nil { + return nil + } + out := new(CDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminPassword) DeepCopyInto(out *CDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminPassword. +func (in *CDBAdminPassword) DeepCopy() *CDBAdminPassword { + if in == nil { + return nil + } + out := new(CDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminUser) DeepCopyInto(out *CDBAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminUser. +func (in *CDBAdminUser) DeepCopy() *CDBAdminUser { + if in == nil { + return nil + } + out := new(CDBAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBList) DeepCopyInto(out *CDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBList. +func (in *CDBList) DeepCopy() *CDBList { + if in == nil { + return nil + } + out := new(CDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPRIVKEY) DeepCopyInto(out *CDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPRIVKEY. +func (in *CDBPRIVKEY) DeepCopy() *CDBPRIVKEY { + if in == nil { + return nil + } + out := new(CDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPUBKEY) DeepCopyInto(out *CDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPUBKEY. +func (in *CDBPUBKEY) DeepCopy() *CDBPUBKEY { + if in == nil { + return nil + } + out := new(CDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSecret) DeepCopyInto(out *CDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSecret. +func (in *CDBSecret) DeepCopy() *CDBSecret { + if in == nil { + return nil + } + out := new(CDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSpec) DeepCopyInto(out *CDBSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.CDBAdminUser = in.CDBAdminUser + out.CDBAdminPwd = in.CDBAdminPwd + out.CDBTlsKey = in.CDBTlsKey + out.CDBTlsCrt = in.CDBTlsCrt + out.ORDSPwd = in.ORDSPwd + out.WebServerUser = in.WebServerUser + out.WebServerPwd = in.WebServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CDBPubKey = in.CDBPubKey + out.CDBPriKey = in.CDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSpec. +func (in *CDBSpec) DeepCopy() *CDBSpec { + if in == nil { + return nil + } + out := new(CDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBStatus) DeepCopyInto(out *CDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBStatus. +func (in *CDBStatus) DeepCopy() *CDBStatus { + if in == nil { + return nil + } + out := new(CDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSysAdminPassword) DeepCopyInto(out *CDBSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSysAdminPassword. +func (in *CDBSysAdminPassword) DeepCopy() *CDBSysAdminPassword { + if in == nil { + return nil + } + out := new(CDBSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSCRT) DeepCopyInto(out *CDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSCRT. +func (in *CDBTLSCRT) DeepCopy() *CDBTLSCRT { + if in == nil { + return nil + } + out := new(CDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSKEY) DeepCopyInto(out *CDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSKEY. +func (in *CDBTLSKEY) DeepCopy() *CDBTLSKEY { + if in == nil { + return nil + } + out := new(CDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. +func (in *CatalogSpec) DeepCopy() *CatalogSpec { + if in == nil { + return nil + } + out := new(CatalogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringProfile) DeepCopyInto(out *ConnectionStringProfile) { + *out = *in + if in.ConnectionStrings != nil { + in, out := &in.ConnectionStrings, &out.ConnectionStrings + *out = make([]ConnectionStringSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringProfile. +func (in *ConnectionStringProfile) DeepCopy() *ConnectionStringProfile { + if in == nil { + return nil + } + out := new(ConnectionStringProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringSpec) DeepCopyInto(out *ConnectionStringSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringSpec. +func (in *ConnectionStringSpec) DeepCopy() *ConnectionStringSpec { + if in == nil { + return nil + } + out := new(ConnectionStringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBroker) DeepCopyInto(out *DataguardBroker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBroker. +func (in *DataguardBroker) DeepCopy() *DataguardBroker { + if in == nil { + return nil + } + out := new(DataguardBroker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBroker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerList) DeepCopyInto(out *DataguardBrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataguardBroker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerList. +func (in *DataguardBrokerList) DeepCopy() *DataguardBrokerList { + if in == nil { + return nil + } + out := new(DataguardBrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerSpec) DeepCopyInto(out *DataguardBrokerSpec) { + *out = *in + if in.StandbyDatabaseRefs != nil { + in, out := &in.StandbyDatabaseRefs, &out.StandbyDatabaseRefs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerSpec. +func (in *DataguardBrokerSpec) DeepCopy() *DataguardBrokerSpec { + if in == nil { + return nil + } + out := new(DataguardBrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerStatus) DeepCopyInto(out *DataguardBrokerStatus) { + *out = *in + if in.DatabasesInDataguardConfig != nil { + in, out := &in.DatabasesInDataguardConfig, &out.DatabasesInDataguardConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerStatus. +func (in *DataguardBrokerStatus) DeepCopy() *DataguardBrokerStatus { + if in == nil { + return nil + } + out := new(DataguardBrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneConfig) DeepCopyInto(out *DbCloneConfig) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneConfig. +func (in *DbCloneConfig) DeepCopy() *DbCloneConfig { + if in == nil { + return nil + } + out := new(DbCloneConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneStatus) DeepCopyInto(out *DbCloneStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneStatus. +func (in *DbCloneStatus) DeepCopy() *DbCloneStatus { + if in == nil { + return nil + } + out := new(DbCloneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbStatus) DeepCopyInto(out *DbStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbStatus. +func (in *DbStatus) DeepCopy() *DbStatus { + if in == nil { + return nil + } + out := new(DbStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbSystemDetails) DeepCopyInto(out *DbSystemDetails) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FaultDomains != nil { + in, out := &in.FaultDomains, &out.FaultDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(int) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DbBackupConfig.DeepCopyInto(&out.DbBackupConfig) + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbSystemDetails. +func (in *DbSystemDetails) DeepCopy() *DbSystemDetails { + if in == nil { + return nil + } + out := new(DbSystemDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbWorkrequests) DeepCopyInto(out *DbWorkrequests) { + *out = *in + if in.OperationType != nil { + in, out := &in.OperationType, &out.OperationType + *out = new(string) + **out = **in + } + if in.OperationId != nil { + in, out := &in.OperationId, &out.OperationId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbWorkrequests. +func (in *DbWorkrequests) DeepCopy() *DbWorkrequests { + if in == nil { + return nil + } + out := new(DbWorkrequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystem) DeepCopyInto(out *DbcsSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystem. +func (in *DbcsSystem) DeepCopy() *DbcsSystem { + if in == nil { + return nil + } + out := new(DbcsSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemList) DeepCopyInto(out *DbcsSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DbcsSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemList. +func (in *DbcsSystemList) DeepCopy() *DbcsSystemList { + if in == nil { + return nil + } + out := new(DbcsSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemSpec) DeepCopyInto(out *DbcsSystemSpec) { + *out = *in + in.DbSystem.DeepCopyInto(&out.DbSystem) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.OCIConfigMap != nil { + in, out := &in.OCIConfigMap, &out.OCIConfigMap + *out = new(string) + **out = **in + } + if in.OCISecret != nil { + in, out := &in.OCISecret, &out.OCISecret + *out = new(string) + **out = **in + } + if in.DbClone != nil { + in, out := &in.DbClone, &out.DbClone + *out = new(DbCloneConfig) + (*in).DeepCopyInto(*out) + } + if in.PdbConfigs != nil { + in, out := &in.PdbConfigs, &out.PdbConfigs + *out = make([]PDBConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DbBackupId != nil { + in, out := &in.DbBackupId, &out.DbBackupId + *out = new(string) + **out = **in + } + if in.DatabaseId != nil { + in, out := &in.DatabaseId, &out.DatabaseId + *out = new(string) + **out = **in + } + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemSpec. +func (in *DbcsSystemSpec) DeepCopy() *DbcsSystemSpec { + if in == nil { + return nil + } + out := new(DbcsSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemStatus) DeepCopyInto(out *DbcsSystemStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.DataStoragePercentage != nil { + in, out := &in.DataStoragePercentage, &out.DataStoragePercentage + *out = new(int) + **out = **in + } + if in.DataStorageSizeInGBs != nil { + in, out := &in.DataStorageSizeInGBs, &out.DataStorageSizeInGBs + *out = new(int) + **out = **in + } + if in.RecoStorageSizeInGB != nil { + in, out := &in.RecoStorageSizeInGB, &out.RecoStorageSizeInGB + *out = new(int) + **out = **in + } + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = new(string) + **out = **in + } + if in.DbInfo != nil { + in, out := &in.DbInfo, &out.DbInfo + *out = make([]DbStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Network.DeepCopyInto(&out.Network) + if in.WorkRequests != nil { + in, out := &in.WorkRequests, &out.WorkRequests + *out = make([]DbWorkrequests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.KMSDetailsStatus = in.KMSDetailsStatus + in.DbCloneStatus.DeepCopyInto(&out.DbCloneStatus) + if in.PdbDetailsStatus != nil { + in, out := &in.PdbDetailsStatus, &out.PdbDetailsStatus + *out = make([]PDBDetailsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemStatus. +func (in *DbcsSystemStatus) DeepCopy() *DbcsSystemStatus { + if in == nil { + return nil + } + out := new(DbcsSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariable) DeepCopyInto(out *EnvironmentVariable) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariable. +func (in *EnvironmentVariable) DeepCopy() *EnvironmentVariable { + if in == nil { + return nil + } + out := new(EnvironmentVariable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmServiceSpec) DeepCopyInto(out *GsmServiceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmServiceSpec. +func (in *GsmServiceSpec) DeepCopy() *GsmServiceSpec { + if in == nil { + return nil + } + out := new(GsmServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardDetails) DeepCopyInto(out *GsmShardDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardDetails. +func (in *GsmShardDetails) DeepCopy() *GsmShardDetails { + if in == nil { + return nil + } + out := new(GsmShardDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardGroupSpec) DeepCopyInto(out *GsmShardGroupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardGroupSpec. +func (in *GsmShardGroupSpec) DeepCopy() *GsmShardGroupSpec { + if in == nil { + return nil + } + out := new(GsmShardGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardSpaceSpec) DeepCopyInto(out *GsmShardSpaceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardSpaceSpec. +func (in *GsmShardSpaceSpec) DeepCopy() *GsmShardSpaceSpec { + if in == nil { + return nil + } + out := new(GsmShardSpaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmSpec) DeepCopyInto(out *GsmSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmSpec. +func (in *GsmSpec) DeepCopy() *GsmSpec { + if in == nil { + return nil + } + out := new(GsmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatus) DeepCopyInto(out *GsmStatus) { + *out = *in + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatus. +func (in *GsmStatus) DeepCopy() *GsmStatus { + if in == nil { + return nil + } + out := new(GsmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatusDetails) DeepCopyInto(out *GsmStatusDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatusDetails. +func (in *GsmStatusDetails) DeepCopy() *GsmStatusDetails { + if in == nil { + return nil + } + out := new(GsmStatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAcdSpec) DeepCopyInto(out *K8sAcdSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAcdSpec. +func (in *K8sAcdSpec) DeepCopy() *K8sAcdSpec { + if in == nil { + return nil + } + out := new(K8sAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAdbBackupSpec) DeepCopyInto(out *K8sAdbBackupSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbBackupSpec. +func (in *K8sAdbBackupSpec) DeepCopy() *K8sAdbBackupSpec { + if in == nil { + return nil + } + out := new(K8sAdbBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAdbSpec) DeepCopyInto(out *K8sAdbSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbSpec. +func (in *K8sAdbSpec) DeepCopy() *K8sAdbSpec { + if in == nil { + return nil + } + out := new(K8sAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sSecretSpec) DeepCopyInto(out *K8sSecretSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sSecretSpec. +func (in *K8sSecretSpec) DeepCopy() *K8sSecretSpec { + if in == nil { + return nil + } + out := new(K8sSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSDetailsStatus) DeepCopyInto(out *KMSDetailsStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSDetailsStatus. +func (in *KMSDetailsStatus) DeepCopy() *KMSDetailsStatus { + if in == nil { + return nil + } + out := new(KMSDetailsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ORDSPassword) DeepCopyInto(out *ORDSPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ORDSPassword. +func (in *ORDSPassword) DeepCopy() *ORDSPassword { + if in == nil { + return nil + } + out := new(ORDSPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAcdSpec) DeepCopyInto(out *OciAcdSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAcdSpec. +func (in *OciAcdSpec) DeepCopy() *OciAcdSpec { + if in == nil { + return nil + } + out := new(OciAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAdbSpec) DeepCopyInto(out *OciAdbSpec) { + *out = *in + if in.Ocid != nil { + in, out := &in.Ocid, &out.Ocid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAdbSpec. +func (in *OciAdbSpec) DeepCopy() *OciAdbSpec { + if in == nil { + return nil + } + out := new(OciAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciConfigSpec) DeepCopyInto(out *OciConfigSpec) { + *out = *in + if in.ConfigMapName != nil { + in, out := &in.ConfigMapName, &out.ConfigMapName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciConfigSpec. +func (in *OciConfigSpec) DeepCopy() *OciConfigSpec { + if in == nil { + return nil + } + out := new(OciConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciSecretSpec) DeepCopyInto(out *OciSecretSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciSecretSpec. +func (in *OciSecretSpec) DeepCopy() *OciSecretSpec { + if in == nil { + return nil + } + out := new(OciSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataService) DeepCopyInto(out *OracleRestDataService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataService. +func (in *OracleRestDataService) DeepCopy() *OracleRestDataService { + if in == nil { + return nil + } + out := new(OracleRestDataService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceImage) DeepCopyInto(out *OracleRestDataServiceImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceImage. +func (in *OracleRestDataServiceImage) DeepCopy() *OracleRestDataServiceImage { + if in == nil { + return nil + } + out := new(OracleRestDataServiceImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceList) DeepCopyInto(out *OracleRestDataServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OracleRestDataService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceList. +func (in *OracleRestDataServiceList) DeepCopy() *OracleRestDataServiceList { + if in == nil { + return nil + } + out := new(OracleRestDataServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePassword) DeepCopyInto(out *OracleRestDataServicePassword) { + *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePassword. +func (in *OracleRestDataServicePassword) DeepCopy() *OracleRestDataServicePassword { + if in == nil { + return nil + } + out := new(OracleRestDataServicePassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePersistence) DeepCopyInto(out *OracleRestDataServicePersistence) { + *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePersistence. +func (in *OracleRestDataServicePersistence) DeepCopy() *OracleRestDataServicePersistence { + if in == nil { + return nil + } + out := new(OracleRestDataServicePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopyInto(out *OracleRestDataServiceRestEnableSchemas) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceRestEnableSchemas. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopy() *OracleRestDataServiceRestEnableSchemas { + if in == nil { + return nil + } + out := new(OracleRestDataServiceRestEnableSchemas) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceSpec) DeepCopyInto(out *OracleRestDataServiceSpec) { + *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Image = in.Image + in.OrdsPassword.DeepCopyInto(&out.OrdsPassword) + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.RestEnableSchemas != nil { + in, out := &in.RestEnableSchemas, &out.RestEnableSchemas + *out = make([]OracleRestDataServiceRestEnableSchemas, len(*in)) + copy(*out, *in) + } + in.Persistence.DeepCopyInto(&out.Persistence) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceSpec. +func (in *OracleRestDataServiceSpec) DeepCopy() *OracleRestDataServiceSpec { + if in == nil { + return nil + } + out := new(OracleRestDataServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceStatus) DeepCopyInto(out *OracleRestDataServiceStatus) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceStatus. +func (in *OracleRestDataServiceStatus) DeepCopy() *OracleRestDataServiceStatus { + if in == nil { + return nil + } + out := new(OracleRestDataServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDB) DeepCopyInto(out *PDB) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -57,18 +1996,18 @@ func (in *AutonomousDatabase) DeepCopyInto(out *AutonomousDatabase) { out.Status = in.Status } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabase. -func (in *AutonomousDatabase) DeepCopy() *AutonomousDatabase { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDB. +func (in *PDB) DeepCopy() *PDB { if in == nil { return nil } - out := new(AutonomousDatabase) + out := new(PDB) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AutonomousDatabase) DeepCopyObject() runtime.Object { +func (in *PDB) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -76,77 +2015,100 @@ func (in *AutonomousDatabase) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { +func (in *PDBAdminName) DeepCopyInto(out *PDBAdminName) { *out = *in - if in.AutonomousDatabaseOCID != nil { - in, out := &in.AutonomousDatabaseOCID, &out.AutonomousDatabaseOCID - *out = new(string) - **out = **in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminName. +func (in *PDBAdminName) DeepCopy() *PDBAdminName { + if in == nil { + return nil } - if in.CompartmentOCID != nil { - in, out := &in.CompartmentOCID, &out.CompartmentOCID - *out = new(string) - **out = **in + out := new(PDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminPassword) DeepCopyInto(out *PDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminPassword. +func (in *PDBAdminPassword) DeepCopy() *PDBAdminPassword { + if in == nil { + return nil } - if in.DisplayName != nil { - in, out := &in.DisplayName, &out.DisplayName + out := new(PDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfig) DeepCopyInto(out *PDBConfig) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName *out = new(string) **out = **in } - if in.DbName != nil { - in, out := &in.DbName, &out.DbName + if in.PdbAdminPassword != nil { + in, out := &in.PdbAdminPassword, &out.PdbAdminPassword *out = new(string) **out = **in } - if in.IsDedicated != nil { - in, out := &in.IsDedicated, &out.IsDedicated - *out = new(bool) - **out = **in - } - if in.DbVersion != nil { - in, out := &in.DbVersion, &out.DbVersion + if in.TdeWalletPassword != nil { + in, out := &in.TdeWalletPassword, &out.TdeWalletPassword *out = new(string) **out = **in } - if in.DataStorageSizeInTBs != nil { - in, out := &in.DataStorageSizeInTBs, &out.DataStorageSizeInTBs - *out = new(int) + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) **out = **in } - if in.CPUCoreCount != nil { - in, out := &in.CPUCoreCount, &out.CPUCoreCount - *out = new(int) - **out = **in + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } - in.AdminPassword.DeepCopyInto(&out.AdminPassword) - if in.IsAutoScalingEnabled != nil { - in, out := &in.IsAutoScalingEnabled, &out.IsAutoScalingEnabled + if in.IsDelete != nil { + in, out := &in.IsDelete, &out.IsDelete *out = new(bool) **out = **in } - if in.SubnetOCID != nil { - in, out := &in.SubnetOCID, &out.SubnetOCID + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId *out = new(string) **out = **in } - if in.NsgOCIDs != nil { - in, out := &in.NsgOCIDs, &out.NsgOCIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PrivateEndpoint != nil { - in, out := &in.PrivateEndpoint, &out.PrivateEndpoint - *out = new(string) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfig. +func (in *PDBConfig) DeepCopy() *PDBConfig { + if in == nil { + return nil } - if in.PrivateEndpointLabel != nil { - in, out := &in.PrivateEndpointLabel, &out.PrivateEndpointLabel + out := new(PDBConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfigStatus) DeepCopyInto(out *PDBConfigStatus) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName *out = new(string) **out = **in } - if in.PrivateEndpointIP != nil { - in, out := &in.PrivateEndpointIP, &out.PrivateEndpointIP - *out = new(string) + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) **out = **in } if in.FreeformTags != nil { @@ -156,263 +2118,239 @@ func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails (*out)[key] = val } } - in.Wallet.DeepCopyInto(&out.Wallet) + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseDetails. -func (in *AutonomousDatabaseDetails) DeepCopy() *AutonomousDatabaseDetails { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfigStatus. +func (in *PDBConfigStatus) DeepCopy() *PDBConfigStatus { if in == nil { return nil } - out := new(AutonomousDatabaseDetails) + out := new(PDBConfigStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutonomousDatabaseList) DeepCopyInto(out *AutonomousDatabaseList) { +func (in *PDBDetailsStatus) DeepCopyInto(out *PDBDetailsStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AutonomousDatabase, len(*in)) + if in.PDBConfigStatus != nil { + in, out := &in.PDBConfigStatus, &out.PDBConfigStatus + *out = make([]PDBConfigStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseList. -func (in *AutonomousDatabaseList) DeepCopy() *AutonomousDatabaseList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBDetailsStatus. +func (in *PDBDetailsStatus) DeepCopy() *PDBDetailsStatus { if in == nil { return nil } - out := new(AutonomousDatabaseList) + out := new(PDBDetailsStatus) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AutonomousDatabaseList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutonomousDatabaseSpec) DeepCopyInto(out *AutonomousDatabaseSpec) { +func (in *PDBList) DeepCopyInto(out *PDBList) { *out = *in - in.Details.DeepCopyInto(&out.Details) - in.OCIConfig.DeepCopyInto(&out.OCIConfig) - if in.HardLink != nil { - in, out := &in.HardLink, &out.HardLink - *out = new(bool) - **out = **in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseSpec. -func (in *AutonomousDatabaseSpec) DeepCopy() *AutonomousDatabaseSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBList. +func (in *PDBList) DeepCopy() *PDBList { if in == nil { return nil } - out := new(AutonomousDatabaseSpec) + out := new(PDBList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutonomousDatabaseStatus) DeepCopyInto(out *AutonomousDatabaseStatus) { +func (in *PDBPRIVKEY) DeepCopyInto(out *PDBPRIVKEY) { *out = *in + out.Secret = in.Secret } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseStatus. -func (in *AutonomousDatabaseStatus) DeepCopy() *AutonomousDatabaseStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPRIVKEY. +func (in *PDBPRIVKEY) DeepCopy() *PDBPRIVKEY { if in == nil { return nil } - out := new(AutonomousDatabaseStatus) + out := new(PDBPRIVKEY) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { +func (in *PDBPUBKEY) DeepCopyInto(out *PDBPUBKEY) { *out = *in - if in.EnvVars != nil { - in, out := &in.EnvVars, &out.EnvVars - *out = make([]EnvironmentVariable, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(corev1.ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PvAnnotations != nil { - in, out := &in.PvAnnotations, &out.PvAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PvMatchLabels != nil { - in, out := &in.PvMatchLabels, &out.PvMatchLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ImagePulllPolicy != nil { - in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy - *out = new(corev1.PullPolicy) - **out = **in - } + out.Secret = in.Secret } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. -func (in *CatalogSpec) DeepCopy() *CatalogSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPUBKEY. +func (in *PDBPUBKEY) DeepCopy() *PDBPUBKEY { if in == nil { return nil } - out := new(CatalogSpec) + out := new(PDBPUBKEY) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvironmentVariable) DeepCopyInto(out *EnvironmentVariable) { +func (in *PDBSecret) DeepCopyInto(out *PDBSecret) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariable. -func (in *EnvironmentVariable) DeepCopy() *EnvironmentVariable { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSecret. +func (in *PDBSecret) DeepCopy() *PDBSecret { if in == nil { return nil } - out := new(EnvironmentVariable) + out := new(PDBSecret) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GsmSpec) DeepCopyInto(out *GsmSpec) { +func (in *PDBSpec) DeepCopyInto(out *PDBSpec) { *out = *in - if in.EnvVars != nil { - in, out := &in.EnvVars, &out.EnvVars - *out = make([]EnvironmentVariable, len(*in)) - copy(*out, *in) + out.PDBTlsKey = in.PDBTlsKey + out.PDBTlsCrt = in.PDBTlsCrt + out.PDBTlsCat = in.PDBTlsCat + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.WebServerUsr = in.WebServerUsr + out.WebServerPwd = in.WebServerPwd + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(corev1.ResourceRequirements) - (*in).DeepCopyInto(*out) + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in } - if in.PvMatchLabels != nil { - in, out := &in.PvMatchLabels, &out.PvMatchLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.TDEImport != nil { + in, out := &in.TDEImport, &out.TDEImport + *out = new(bool) + **out = **in + } + if in.TDEExport != nil { + in, out := &in.TDEExport, &out.TDEExport + *out = new(bool) + **out = **in } - if in.ImagePulllPolicy != nil { - in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy - *out = new(corev1.PullPolicy) + out.TDEPassword = in.TDEPassword + out.TDESecret = in.TDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) **out = **in } + out.PDBPubKey = in.PDBPubKey + out.PDBPriKey = in.PDBPriKey } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmSpec. -func (in *GsmSpec) DeepCopy() *GsmSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSpec. +func (in *PDBSpec) DeepCopy() *PDBSpec { if in == nil { return nil } - out := new(GsmSpec) + out := new(PDBSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GsmStatus) DeepCopyInto(out *GsmStatus) { +func (in *PDBStatus) DeepCopyInto(out *PDBStatus) { *out = *in - if in.Shards != nil { - in, out := &in.Shards, &out.Shards - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Details != nil { - in, out := &in.Details, &out.Details - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatus. -func (in *GsmStatus) DeepCopy() *GsmStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBStatus. +func (in *PDBStatus) DeepCopy() *PDBStatus { if in == nil { return nil } - out := new(GsmStatus) + out := new(PDBStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GsmStatusDetails) DeepCopyInto(out *GsmStatusDetails) { +func (in *PDBTLSCAT) DeepCopyInto(out *PDBTLSCAT) { *out = *in + out.Secret = in.Secret } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatusDetails. -func (in *GsmStatusDetails) DeepCopy() *GsmStatusDetails { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCAT. +func (in *PDBTLSCAT) DeepCopy() *PDBTLSCAT { if in == nil { return nil } - out := new(GsmStatusDetails) + out := new(PDBTLSCAT) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { +func (in *PDBTLSCRT) DeepCopyInto(out *PDBTLSCRT) { *out = *in - if in.ConfigMapName != nil { - in, out := &in.ConfigMapName, &out.ConfigMapName - *out = new(string) - **out = **in - } - if in.SecretName != nil { - in, out := &in.SecretName, &out.SecretName - *out = new(string) - **out = **in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCRT. +func (in *PDBTLSCRT) DeepCopy() *PDBTLSCRT { + if in == nil { + return nil } + out := new(PDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSKEY) DeepCopyInto(out *PDBTLSKEY) { + *out = *in + out.Secret = in.Secret } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. -func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSKEY. +func (in *PDBTLSKEY) DeepCopy() *PDBTLSKEY { if in == nil { return nil } - out := new(OCIConfigSpec) + out := new(PDBTLSKEY) in.DeepCopyInto(out) return out } @@ -420,24 +2358,36 @@ func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { *out = *in - if in.K8sSecretName != nil { - in, out := &in.K8sSecretName, &out.K8sSecretName - *out = new(string) - **out = **in + in.K8sSecret.DeepCopyInto(&out.K8sSecret) + in.OciSecret.DeepCopyInto(&out.OciSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. +func (in *PasswordSpec) DeepCopy() *PasswordSpec { + if in == nil { + return nil } - if in.OCISecretOCID != nil { - in, out := &in.OCISecretOCID, &out.OCISecretOCID + out := new(PasswordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PitSpec) DeepCopyInto(out *PitSpec) { + *out = *in + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. -func (in *PasswordSpec) DeepCopy() *PasswordSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PitSpec. +func (in *PitSpec) DeepCopy() *PitSpec { if in == nil { return nil } - out := new(PasswordSpec) + out := new(PitSpec) in.DeepCopyInto(out) return out } @@ -457,6 +2407,21 @@ func (in *PortMapping) DeepCopy() *PortMapping { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretDetails) DeepCopyInto(out *SecretDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretDetails. +func (in *SecretDetails) DeepCopy() *SecretDetails { + if in == nil { + return nil + } + out := new(SecretDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ShardSpec) DeepCopyInto(out *ShardSpec) { *out = *in @@ -596,6 +2561,31 @@ func (in *ShardingDatabaseSpec) DeepCopyInto(out *ShardingDatabaseSpec) { *out = make([]PortMapping, len(*in)) copy(*out, *in) } + if in.GsmShardSpace != nil { + in, out := &in.GsmShardSpace, &out.GsmShardSpace + *out = make([]GsmShardSpaceSpec, len(*in)) + copy(*out, *in) + } + if in.GsmShardGroup != nil { + in, out := &in.GsmShardGroup, &out.GsmShardGroup + *out = make([]GsmShardGroupSpec, len(*in)) + copy(*out, *in) + } + if in.ShardRegion != nil { + in, out := &in.ShardRegion, &out.ShardRegion + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GsmService != nil { + in, out := &in.GsmService, &out.GsmService + *out = make([]GsmServiceSpec, len(*in)) + copy(*out, *in) + } + if in.DbSecret != nil { + in, out := &in.DbSecret, &out.DbSecret + *out = new(SecretDetails) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseSpec. @@ -675,6 +2665,11 @@ func (in *SingleInstanceDatabase) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SingleInstanceDatabaseAdminPassword) DeepCopyInto(out *SingleInstanceDatabaseAdminPassword) { *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseAdminPassword. @@ -752,6 +2747,11 @@ func (in *SingleInstanceDatabaseList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SingleInstanceDatabasePersistence) DeepCopyInto(out *SingleInstanceDatabasePersistence) { *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabasePersistence. @@ -764,9 +2764,76 @@ func (in *SingleInstanceDatabasePersistence) DeepCopy() *SingleInstanceDatabaseP return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResource) DeepCopyInto(out *SingleInstanceDatabaseResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResource. +func (in *SingleInstanceDatabaseResource) DeepCopy() *SingleInstanceDatabaseResource { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResources) DeepCopyInto(out *SingleInstanceDatabaseResources) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SingleInstanceDatabaseResource) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(SingleInstanceDatabaseResource) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResources. +func (in *SingleInstanceDatabaseResources) DeepCopy() *SingleInstanceDatabaseResources { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSpec) { *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FlashBack != nil { + in, out := &in.FlashBack, &out.FlashBack + *out = new(bool) + **out = **in + } + if in.ArchiveLog != nil { + in, out := &in.ArchiveLog, &out.ArchiveLog + *out = new(bool) + **out = **in + } + if in.ForceLogging != nil { + in, out := &in.ForceLogging, &out.ForceLogging + *out = new(bool) + **out = **in + } + if in.TrueCacheServices != nil { + in, out := &in.TrueCacheServices, &out.TrueCacheServices + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -774,10 +2841,15 @@ func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSp (*out)[key] = val } } - out.AdminPassword = in.AdminPassword + in.AdminPassword.DeepCopyInto(&out.AdminPassword) out.Image = in.Image - out.Persistence = in.Persistence - out.InitParams = in.InitParams + in.Persistence.DeepCopyInto(&out.Persistence) + if in.InitParams != nil { + in, out := &in.InitParams, &out.InitParams + *out = new(SingleInstanceDatabaseInitParams) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseSpec. @@ -798,6 +2870,11 @@ func (in *SingleInstanceDatabaseStatus) DeepCopyInto(out *SingleInstanceDatabase *out = make([]string, len(*in)) copy(*out, *in) } + if in.DgBroker != nil { + in, out := &in.DgBroker, &out.DgBroker + *out = new(string) + **out = **in + } if in.StandbyDatabases != nil { in, out := &in.StandbyDatabases, &out.StandbyDatabases *out = make(map[string]string, len(*in)) @@ -813,7 +2890,7 @@ func (in *SingleInstanceDatabaseStatus) DeepCopyInto(out *SingleInstanceDatabase } } out.InitParams = in.InitParams - out.Persistence = in.Persistence + in.Persistence.DeepCopyInto(&out.Persistence) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseStatus. @@ -826,6 +2903,107 @@ func (in *SingleInstanceDatabaseStatus) DeepCopy() *SingleInstanceDatabaseStatus return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { + *out = *in + in.K8sAdbBackup.DeepCopyInto(&out.K8sAdbBackup) + in.PointInTime.DeepCopyInto(&out.PointInTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. +func (in *SourceSpec) DeepCopy() *SourceSpec { + if in == nil { + return nil + } + out := new(SourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDEPwd) DeepCopyInto(out *TDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDEPwd. +func (in *TDEPwd) DeepCopy() *TDEPwd { + if in == nil { + return nil + } + out := new(TDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDESecret) DeepCopyInto(out *TDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDESecret. +func (in *TDESecret) DeepCopy() *TDESecret { + if in == nil { + return nil + } + out := new(TDESecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { + *out = *in + in.K8sAdb.DeepCopyInto(&out.K8sAdb) + in.OciAdb.DeepCopyInto(&out.OciAdb) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. +func (in *TargetSpec) DeepCopy() *TargetSpec { + if in == nil { + return nil + } + out := new(TargetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VmNetworkDetails) DeepCopyInto(out *VmNetworkDetails) { + *out = *in + if in.VcnName != nil { + in, out := &in.VcnName, &out.VcnName + *out = new(string) + **out = **in + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.ScanDnsName != nil { + in, out := &in.ScanDnsName, &out.ScanDnsName + *out = new(string) + **out = **in + } + if in.ListenerPort != nil { + in, out := &in.ListenerPort, &out.ListenerPort + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmNetworkDetails. +func (in *VmNetworkDetails) DeepCopy() *VmNetworkDetails { + if in == nil { + return nil + } + out := new(VmNetworkDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WalletSpec) DeepCopyInto(out *WalletSpec) { *out = *in @@ -846,3 +3024,67 @@ func (in *WalletSpec) DeepCopy() *WalletSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPassword) DeepCopyInto(out *WebServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPassword. +func (in *WebServerPassword) DeepCopy() *WebServerPassword { + if in == nil { + return nil + } + out := new(WebServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPasswordPDB) DeepCopyInto(out *WebServerPasswordPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPasswordPDB. +func (in *WebServerPasswordPDB) DeepCopy() *WebServerPasswordPDB { + if in == nil { + return nil + } + out := new(WebServerPasswordPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUser) DeepCopyInto(out *WebServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUser. +func (in *WebServerUser) DeepCopy() *WebServerUser { + if in == nil { + return nil + } + out := new(WebServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUserPDB) DeepCopyInto(out *WebServerUserPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUserPDB. +func (in *WebServerUserPDB) DeepCopy() *WebServerUserPDB { + if in == nil { + return nil + } + out := new(WebServerUserPDB) + in.DeepCopyInto(out) + return out +} diff --git a/apis/database/v4/adbfamily_common_spec.go b/apis/database/v4/adbfamily_common_spec.go new file mode 100644 index 00000000..87434852 --- /dev/null +++ b/apis/database/v4/adbfamily_common_spec.go @@ -0,0 +1,67 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec +const LastSuccessfulSpec string = "lastSuccessfulSpec" + +/************************ +* OCI config +************************/ +type OciConfigSpec struct { + ConfigMapName *string `json:"configMapName,omitempty"` + SecretName *string `json:"secretName,omitempty"` +} + +/************************ +* ADB spec +************************/ +type K8sAdbSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAdbSpec struct { + OCID *string `json:"ocid,omitempty"` +} + +// TargetSpec defines the spec of the target for backup/restore runs. +type TargetSpec struct { + K8sAdb K8sAdbSpec `json:"k8sADB,omitempty"` + OciAdb OciAdbSpec `json:"ociADB,omitempty"` +} diff --git a/apis/database/v4/adbfamily_utils.go b/apis/database/v4/adbfamily_utils.go new file mode 100644 index 00000000..380dab35 --- /dev/null +++ b/apis/database/v4/adbfamily_utils.go @@ -0,0 +1,287 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "errors" + "reflect" + "time" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" +) + +// This file contains the util functions that are shared by specs in both +// apis/database/v1alpha1 and apis/database/v4. + +/************************** +* Remove Unchanged Fields +**************************/ + +// removeUnchangedFields removes the unchanged fields in the struct and returns if the struct is changed. +// lastSpec should be a derefereced struct that is the last successful spec, e.g. AutonomousDatabaseSpec. +// curSpec should be a pointer pointing to the struct that is being proccessed, e.g., *AutonomousDatabaseSpec. +func RemoveUnchangedFields(lastSpec interface{}, curSpec interface{}) (bool, error) { + if reflect.ValueOf(lastSpec).Kind() != reflect.Struct { + return false, errors.New("lastSpec should be a struct") + } + + if reflect.ValueOf(curSpec).Kind() != reflect.Ptr || reflect.ValueOf(curSpec).Elem().Kind() != reflect.Struct { + return false, errors.New("curSpec should be a struct pointer") + } + + if reflect.ValueOf(lastSpec).Type() != reflect.ValueOf(curSpec).Elem().Type() { + return false, errors.New("the referenced type of curSpec should be the same as the type of lastSpec") + } + + return traverse(lastSpec, curSpec), nil +} + +// Traverse and compare each fields in the lastSpec and the the curSpec. +// If unchanged, set the field in curSpec to a zero value. +// lastSpec should be a derefereced struct that is the last successful spec, e.g. AutonomousDatabaseSpec. +// curSpec should be a pointer pointing to the struct that is being proccessed, e.g., *AutonomousDatabaseSpec. +func traverse(lastSpec interface{}, curSpec interface{}) bool { + var changed bool = false + + fields := reflect.VisibleFields(reflect.TypeOf(lastSpec)) + + lastSpecValue := reflect.ValueOf(lastSpec) + curSpecValue := reflect.ValueOf(curSpec).Elem() // deref the struct + + for _, field := range fields { + lastField := lastSpecValue.FieldByName(field.Name) + curField := curSpecValue.FieldByName(field.Name) + + // call traverse() if the current field is a struct + if field.Type.Kind() == reflect.Struct { + childrenChanged := traverse(lastField.Interface(), curField.Addr().Interface()) + if childrenChanged && !changed { + changed = true + } + } else { + fieldChanged := hasChanged(lastField, curField) + + // if fieldChanged { + // if curField.Kind() == reflect.Ptr { + // fmt.Printf("== field %s changed\n", field.Name) + // if lastField.IsZero() { + // fmt.Printf("=== lastField is nil\n") + // } else { + // fmt.Printf("=== lastField = %v\n", lastField.Elem().Interface()) + // } + // if curField.IsZero() { + // fmt.Printf("===== curField is nil\n") + // } else { + // fmt.Printf("===== curField = %v\n", curField.Elem().Interface()) + // } + // } else { + // fmt.Printf("=== lastField = %v\n", lastField.Interface()) + // fmt.Printf("===== curField = %v\n", curField.Interface()) + // } + // } + + if fieldChanged && !changed { + changed = true + } + + // Set the field to zero value if unchanged + if !fieldChanged { + curField.Set(reflect.Zero(curField.Type())) + } + } + } + + return changed +} + +// 1. If the current field is with a zero value, then the field is unchanged. +// 2. If the current field is NOT with a zero value, then we want to comapre it with the last field. +// In this case if the last field is with a zero value, then the field is changed +func hasChanged(lastField reflect.Value, curField reflect.Value) bool { + zero := reflect.Zero(lastField.Type()).Interface() + lastFieldIsZero := reflect.DeepEqual(lastField.Interface(), zero) + curFieldIsZero := reflect.DeepEqual(curField.Interface(), zero) + + if curFieldIsZero { + return false + } else if !lastFieldIsZero { + var lastIntrf interface{} + var curIntrf interface{} + + if curField.Kind() == reflect.Ptr { + lastIntrf = lastField.Elem().Interface() + curIntrf = curField.Elem().Interface() + } else { + lastIntrf = lastField.Interface() + curIntrf = curField.Interface() + } + + return !reflect.DeepEqual(lastIntrf, curIntrf) + } + + return true +} + +/************************ +* SDKTime format +************************/ + +// Follow the format of the display time +const displayFormat = "2006-01-02 15:04:05 MST" + +func FormatSDKTime(sdkTime *common.SDKTime) string { + if sdkTime == nil { + return "" + } + + time := sdkTime.Time + return time.Format(displayFormat) +} + +func ParseDisplayTime(val string) (*common.SDKTime, error) { + parsedTime, err := time.Parse(displayFormat, val) + if err != nil { + return nil, err + } + sdkTime := common.SDKTime{Time: parsedTime} + return &sdkTime, nil +} + +/************************ +* LifecycleState check +************************/ +func IsAdbIntermediateState(state database.AutonomousDatabaseLifecycleStateEnum) bool { + if state == database.AutonomousDatabaseLifecycleStateProvisioning || + state == database.AutonomousDatabaseLifecycleStateUpdating || + state == database.AutonomousDatabaseLifecycleStateScaleInProgress || + state == database.AutonomousDatabaseLifecycleStateStarting || + state == database.AutonomousDatabaseLifecycleStateStopping || + state == database.AutonomousDatabaseLifecycleStateTerminating || + state == database.AutonomousDatabaseLifecycleStateRestoreInProgress || + state == database.AutonomousDatabaseLifecycleStateBackupInProgress || + state == database.AutonomousDatabaseLifecycleStateMaintenanceInProgress || + state == database.AutonomousDatabaseLifecycleStateRestarting || + state == database.AutonomousDatabaseLifecycleStateRecreating || + state == database.AutonomousDatabaseLifecycleStateRoleChangeInProgress || + state == database.AutonomousDatabaseLifecycleStateUpgrading { + return true + } + return false +} + +func CanBeTerminated(state database.AutonomousDatabaseLifecycleStateEnum) bool { + if state == database.AutonomousDatabaseLifecycleStateProvisioning || + state == database.AutonomousDatabaseLifecycleStateAvailable || + state == database.AutonomousDatabaseLifecycleStateStopped || + state == database.AutonomousDatabaseLifecycleStateUnavailable || + state == database.AutonomousDatabaseLifecycleStateRestoreInProgress || + state == database.AutonomousDatabaseLifecycleStateRestoreFailed || + state == database.AutonomousDatabaseLifecycleStateBackupInProgress || + state == database.AutonomousDatabaseLifecycleStateScaleInProgress || + state == database.AutonomousDatabaseLifecycleStateAvailableNeedsAttention || + state == database.AutonomousDatabaseLifecycleStateUpdating || + state == database.AutonomousDatabaseLifecycleStateMaintenanceInProgress || + state == database.AutonomousDatabaseLifecycleStateRoleChangeInProgress || + state == database.AutonomousDatabaseLifecycleStateUpgrading { + return true + } + return false +} + +// NextADBStableState returns the next stable state if it's an intermediate state. +// Otherwise returns the same state. +func NextADBStableState(state database.AutonomousDatabaseLifecycleStateEnum) database.AutonomousDatabaseLifecycleStateEnum { + if state == database.AutonomousDatabaseLifecycleStateProvisioning || + state == database.AutonomousDatabaseLifecycleStateStarting || + state == database.AutonomousDatabaseLifecycleStateRestoreInProgress || + state == database.AutonomousDatabaseLifecycleStateBackupInProgress || + state == database.AutonomousDatabaseLifecycleStateScaleInProgress || + state == database.AutonomousDatabaseLifecycleStateUpdating || + state == database.AutonomousDatabaseLifecycleStateMaintenanceInProgress || + state == database.AutonomousDatabaseLifecycleStateRestarting || + state == database.AutonomousDatabaseLifecycleStateRecreating || + state == database.AutonomousDatabaseLifecycleStateRoleChangeInProgress || + state == database.AutonomousDatabaseLifecycleStateUpgrading { + + return database.AutonomousDatabaseLifecycleStateAvailable + } + + if state == database.AutonomousDatabaseLifecycleStateStopping { + return database.AutonomousDatabaseLifecycleStateStopped + } + + if state == database.AutonomousDatabaseLifecycleStateTerminating { + return database.AutonomousDatabaseLifecycleStateTerminated + } + + return state +} + +func IsBackupIntermediateState(state database.AutonomousDatabaseBackupLifecycleStateEnum) bool { + if state == database.AutonomousDatabaseBackupLifecycleStateCreating || + state == database.AutonomousDatabaseBackupLifecycleStateDeleting { + return true + } + return false +} + +func IsRestoreIntermediateState(state workrequests.WorkRequestStatusEnum) bool { + if state == workrequests.WorkRequestStatusAccepted || + state == workrequests.WorkRequestStatusInProgress || + state == workrequests.WorkRequestStatusCanceling { + return true + } + return false +} + +func IsACDIntermediateState(state database.AutonomousContainerDatabaseLifecycleStateEnum) bool { + if state == database.AutonomousContainerDatabaseLifecycleStateProvisioning || + state == database.AutonomousContainerDatabaseLifecycleStateUpdating || + state == database.AutonomousContainerDatabaseLifecycleStateTerminating || + state == database.AutonomousContainerDatabaseLifecycleStateBackupInProgress || + state == database.AutonomousContainerDatabaseLifecycleStateRestoring || + state == database.AutonomousContainerDatabaseLifecycleStateRestarting || + state == database.AutonomousContainerDatabaseLifecycleStateMaintenanceInProgress || + state == database.AutonomousContainerDatabaseLifecycleStateRoleChangeInProgress { + return true + } + return false +} diff --git a/apis/database/v4/autonomouscontainerdatabase_types.go b/apis/database/v4/autonomouscontainerdatabase_types.go new file mode 100644 index 00000000..be9cc615 --- /dev/null +++ b/apis/database/v4/autonomouscontainerdatabase_types.go @@ -0,0 +1,226 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "encoding/json" + "reflect" + + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// name of our custom finalizer +const ACDFinalizer = "database.oracle.com/acd-finalizer" + +type AcdActionEnum string + +const ( + AcdActionBlank AcdActionEnum = "" + AcdActionRestart AcdActionEnum = "RESTART" + AcdActionTerminate AcdActionEnum = "TERMINATE" +) + +func GetAcdActionEnumFromString(val string) (AcdActionEnum, bool) { + var mappingAcdActionEnum = map[string]AcdActionEnum{ + "RESTART": AcdActionRestart, + "TERMINATE": AcdActionTerminate, + "": AcdActionBlank, + } + + enum, ok := mappingAcdActionEnum[val] + return enum, ok +} + +// AutonomousContainerDatabaseSpec defines the desired state of AutonomousContainerDatabase +type AutonomousContainerDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + AutonomousContainerDatabaseOCID *string `json:"autonomousContainerDatabaseOCID,omitempty"` + CompartmentOCID *string `json:"compartmentOCID,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousExadataVMClusterOCID *string `json:"autonomousExadataVMClusterOCID,omitempty"` + // +kubebuilder:validation:Enum:="RELEASE_UPDATES";"RELEASE_UPDATE_REVISIONS" + PatchModel database.AutonomousContainerDatabasePatchModelEnum `json:"patchModel,omitempty"` + // +kubebuilder:validation:Enum:="SYNC";"RESTART";"TERMINATE" + Action AcdActionEnum `json:"action,omitempty"` + FreeformTags map[string]string `json:"freeformTags,omitempty"` + + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:default:=false + HardLink *bool `json:"hardLink,omitempty"` +} + +// AutonomousContainerDatabaseStatus defines the observed state of AutonomousContainerDatabase +type AutonomousContainerDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + LifecycleState database.AutonomousContainerDatabaseLifecycleStateEnum `json:"lifecycleState"` + TimeCreated string `json:"timeCreated,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="acd";"acds" +// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name="DisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +// +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string +// +kubebuilder:storageversion + +// AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases API +type AutonomousContainerDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousContainerDatabaseSpec `json:"spec,omitempty"` + Status AutonomousContainerDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousContainerDatabaseList contains a list of AutonomousContainerDatabase +type AutonomousContainerDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousContainerDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousContainerDatabase{}, &AutonomousContainerDatabaseList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousContainerDatabase) Hub() {} + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (acd *AutonomousContainerDatabase) GetLastSuccessfulSpec() (*AutonomousContainerDatabaseSpec, error) { + val, ok := acd.GetAnnotations()[LastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := AutonomousContainerDatabaseSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} + +func (acd *AutonomousContainerDatabase) UpdateLastSuccessfulSpec() error { + specBytes, err := json.Marshal(acd.Spec) + if err != nil { + return err + } + + anns := acd.GetAnnotations() + + if anns == nil { + anns = map[string]string{ + LastSuccessfulSpec: string(specBytes), + } + } else { + anns[LastSuccessfulSpec] = string(specBytes) + } + + acd.SetAnnotations(anns) + + return nil +} + +// UpdateStatusFromOCIACD updates the status subresource +func (acd *AutonomousContainerDatabase) UpdateStatusFromOCIACD(ociObj database.AutonomousContainerDatabase) { + acd.Status.LifecycleState = ociObj.LifecycleState + acd.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) +} + +// UpdateFromOCIADB updates the attributes using database.AutonomousContainerDatabase object +func (acd *AutonomousContainerDatabase) UpdateFromOCIACD(ociObj database.AutonomousContainerDatabase) (specChanged bool) { + oldACD := acd.DeepCopy() + + /*********************************** + * update the spec + ***********************************/ + acd.Spec.Action = AcdActionBlank + acd.Spec.AutonomousContainerDatabaseOCID = ociObj.Id + acd.Spec.CompartmentOCID = ociObj.CompartmentId + acd.Spec.DisplayName = ociObj.DisplayName + acd.Spec.AutonomousExadataVMClusterOCID = ociObj.CloudAutonomousVmClusterId + acd.Spec.PatchModel = ociObj.PatchModel + + // special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. + if len(ociObj.FreeformTags) != 0 { + acd.Spec.FreeformTags = ociObj.FreeformTags + } else { + acd.Spec.FreeformTags = nil + } + + /*********************************** + * update the status subresource + ***********************************/ + acd.UpdateStatusFromOCIACD(ociObj) + + return !reflect.DeepEqual(oldACD.Spec, acd.Spec) +} + +// RemoveUnchangedSpec removes the unchanged fields in spec, and returns if the spec has been changed. +func (acd *AutonomousContainerDatabase) RemoveUnchangedSpec(prevSpec AutonomousContainerDatabaseSpec) (bool, error) { + changed, err := RemoveUnchangedFields(prevSpec, &acd.Spec) + if err != nil { + return changed, err + } + + return changed, nil +} + +// A helper function which is useful for debugging. The function prints out a structural JSON format. +func (acd *AutonomousContainerDatabase) String() (string, error) { + out, err := json.MarshalIndent(acd, "", " ") + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/apis/database/v4/autonomouscontainerdatabase_webhook.go b/apis/database/v4/autonomouscontainerdatabase_webhook.go new file mode 100644 index 00000000..9fcb9d8b --- /dev/null +++ b/apis/database/v4/autonomouscontainerdatabase_webhook.go @@ -0,0 +1,110 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomouscontainerdatabaselog = logf.Log.WithName("autonomouscontainerdatabase-resource") + +func (r *AutonomousContainerDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomouscontainerdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomouscontainerdatabases,versions=v4,name=vautonomouscontainerdatabasev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousContainerDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateCreate() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate create", "name", r.Name) + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldACD *AutonomousContainerDatabase = old.(*AutonomousContainerDatabase) + + autonomouscontainerdatabaselog.Info("validate update", "name", r.Name) + + // skip the update of adding ADB OCID or binding + if oldACD.Status.LifecycleState == "" { + return nil, nil + } + + // cannot update when the old state is in intermediate state, except for the terminate operatrion + var copiedSpec *AutonomousContainerDatabaseSpec = r.Spec.DeepCopy() + changed, err := RemoveUnchangedFields(oldACD.Spec, copiedSpec) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), err.Error())) + } + if IsACDIntermediateState(oldACD.Status.LifecycleState) && changed { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), + "cannot change the spec when the lifecycleState is in an intermdeiate state")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousContainerDatabase"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateDelete() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/autonomousdatabase_types.go b/apis/database/v4/autonomousdatabase_types.go new file mode 100644 index 00000000..628dd882 --- /dev/null +++ b/apis/database/v4/autonomousdatabase_types.go @@ -0,0 +1,393 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "encoding/json" + "reflect" + + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AutonomousDatabaseSpec defines the desired state of AutonomousDatabase +// Important: Run "make" to regenerate code after modifying this file +type AutonomousDatabaseSpec struct { + // +kubebuilder:validation:Enum:="";Create;Sync;Update;Stop;Start;Terminate;Clone + Action string `json:"action"` + Details AutonomousDatabaseDetails `json:"details,omitempty"` + Clone AutonomousDatabaseClone `json:"clone,omitempty"` + Wallet WalletSpec `json:"wallet,omitempty"` + OciConfig OciConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:default:=false + HardLink *bool `json:"hardLink,omitempty"` +} + +type AutonomousDatabaseDetails struct { + AutonomousDatabaseBase `json:",inline"` + Id *string `json:"id,omitempty"` +} + +type AutonomousDatabaseClone struct { + AutonomousDatabaseBase `json:",inline"` + // +kubebuilder:validation:Enum:="FULL";"METADATA" + CloneType database.CreateAutonomousDatabaseCloneDetailsCloneTypeEnum `json:"cloneType,omitempty"` +} + +// AutonomousDatabaseBase defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase +type AutonomousDatabaseBase struct { + CompartmentId *string `json:"compartmentId,omitempty"` + AutonomousContainerDatabase AcdSpec `json:"autonomousContainerDatabase,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DbName *string `json:"dbName,omitempty"` + // +kubebuilder:validation:Enum:="OLTP";"DW";"AJD";"APEX" + DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` + // +kubebuilder:validation:Enum:="LICENSE_INCLUDED";"BRING_YOUR_OWN_LICENSE" + LicenseModel database.AutonomousDatabaseLicenseModelEnum `json:"licenseModel,omitempty"` + DbVersion *string `json:"dbVersion,omitempty"` + DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` + CpuCoreCount *int `json:"cpuCoreCount,omitempty"` + // +kubebuilder:validation:Enum:="ECPU";"OCPU" + ComputeModel database.AutonomousDatabaseComputeModelEnum `json:"computeModel,omitempty"` + ComputeCount *float32 `json:"computeCount,omitempty"` + OcpuCount *float32 `json:"ocpuCount,omitempty"` + AdminPassword PasswordSpec `json:"adminPassword,omitempty"` + IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` + IsDedicated *bool `json:"isDedicated,omitempty"` + IsFreeTier *bool `json:"isFreeTier,omitempty"` + + // NetworkAccess + IsAccessControlEnabled *bool `json:"isAccessControlEnabled,omitempty"` + WhitelistedIps []string `json:"whitelistedIps,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` + NsgIds []string `json:"nsgIds,omitempty"` + PrivateEndpointLabel *string `json:"privateEndpointLabel,omitempty"` + IsMtlsConnectionRequired *bool `json:"isMtlsConnectionRequired,omitempty"` + + FreeformTags map[string]string `json:"freeformTags,omitempty"` +} + +/************************ +* ACD specs +************************/ +type K8sAcdSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAcdSpec struct { + Id *string `json:"id,omitempty"` +} + +// AcdSpec defines the spec of the target for backup/restore runs. +// The name could be the name of an AutonomousDatabase or an AutonomousDatabaseBackup +type AcdSpec struct { + K8sAcd K8sAcdSpec `json:"k8sAcd,omitempty"` + OciAcd OciAcdSpec `json:"ociAcd,omitempty"` +} + +/************************ +* Secret specs +************************/ +type K8sSecretSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciSecretSpec struct { + Id *string `json:"id,omitempty"` +} + +type PasswordSpec struct { + K8sSecret K8sSecretSpec `json:"k8sSecret,omitempty"` + OciSecret OciSecretSpec `json:"ociSecret,omitempty"` +} + +type WalletSpec struct { + Name *string `json:"name,omitempty"` + Password PasswordSpec `json:"password,omitempty"` +} + +// AutonomousDatabaseStatus defines the observed state of AutonomousDatabase +type AutonomousDatabaseStatus struct { + // Lifecycle State of the ADB + LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` + // Creation time of the ADB + TimeCreated string `json:"timeCreated,omitempty"` + // Expiring date of the instance wallet + WalletExpiringDate string `json:"walletExpiringDate,omitempty"` + // Connection Strings of the ADB + AllConnectionStrings []ConnectionStringProfile `json:"allConnectionStrings,omitempty"` + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type TLSAuthenticationEnum string + +const ( + tlsAuthenticationTLS TLSAuthenticationEnum = "TLS" + tlsAuthenticationMTLS TLSAuthenticationEnum = "Mutual TLS" +) + +func GetTLSAuthenticationEnumFromString(val string) (TLSAuthenticationEnum, bool) { + var mappingTLSAuthenticationEnum = map[string]TLSAuthenticationEnum{ + "TLS": tlsAuthenticationTLS, + "Mutual TLS": tlsAuthenticationMTLS, + } + + enum, ok := mappingTLSAuthenticationEnum[val] + return enum, ok +} + +type ConnectionStringProfile struct { + TLSAuthentication TLSAuthenticationEnum `json:"tlsAuthentication,omitempty"` + ConnectionStrings []ConnectionStringSpec `json:"connectionStrings"` +} + +type ConnectionStringSpec struct { + TNSName string `json:"tnsName,omitempty"` + ConnectionString string `json:"connectionString,omitempty"` +} + +// AutonomousDatabase is the Schema for the autonomousdatabases API +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName="adb";"adbs" +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.details.displayName",name="Display Name",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbName",name="Db Name",type=string +// +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.isDedicated",name="Dedicated",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.cpuCoreCount",name="OCPUs",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dataStorageSizeInTBs",name="Storage (TB)",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbWorkload",name="Workload Type",type=string +// +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string +// +kubebuilder:storageversion +type AutonomousDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseSpec `json:"spec,omitempty"` + Status AutonomousDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AutonomousDatabaseList contains a list of AutonomousDatabase +type AutonomousDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabase{}, &AutonomousDatabaseList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabase) Hub() {} + +// UpdateStatusFromOCIADB updates the status subresource +func (adb *AutonomousDatabase) UpdateStatusFromOciAdb(ociObj database.AutonomousDatabase) { + adb.Status.LifecycleState = ociObj.LifecycleState + adb.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) + + if *ociObj.IsDedicated { + conns := make([]ConnectionStringSpec, len(ociObj.ConnectionStrings.AllConnectionStrings)) + for key, val := range ociObj.ConnectionStrings.AllConnectionStrings { + conns = append(conns, ConnectionStringSpec{TNSName: key, ConnectionString: val}) + } + + adb.Status.AllConnectionStrings = []ConnectionStringProfile{ + {ConnectionStrings: conns}, + } + } else { + var mTLSConns []ConnectionStringSpec + var tlsConns []ConnectionStringSpec + + var conns []ConnectionStringProfile + + for _, profile := range ociObj.ConnectionStrings.Profiles { + if profile.TlsAuthentication == database.DatabaseConnectionStringProfileTlsAuthenticationMutual { + mTLSConns = append(mTLSConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) + } else { + tlsConns = append(tlsConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) + } + } + + if len(mTLSConns) > 0 { + conns = append(conns, ConnectionStringProfile{ + TLSAuthentication: tlsAuthenticationMTLS, + ConnectionStrings: mTLSConns, + }) + } + + if len(tlsConns) > 0 { + conns = append(conns, ConnectionStringProfile{ + TLSAuthentication: tlsAuthenticationTLS, + ConnectionStrings: tlsConns, + }) + } + + adb.Status.AllConnectionStrings = conns + } +} + +// UpdateFromOciAdb updates the attributes using database.AutonomousDatabase object +func (adb *AutonomousDatabase) UpdateFromOciAdb(ociObj database.AutonomousDatabase, overwrite bool) (specChanged bool) { + oldADB := adb.DeepCopy() + + /*********************************** + * update the spec + ***********************************/ + if overwrite || adb.Spec.Details.Id == nil { + adb.Spec.Details.Id = ociObj.Id + } + if overwrite || adb.Spec.Details.CompartmentId == nil { + adb.Spec.Details.CompartmentId = ociObj.CompartmentId + } + if overwrite || adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id == nil { + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = ociObj.AutonomousContainerDatabaseId + } + if overwrite || adb.Spec.Details.DisplayName == nil { + adb.Spec.Details.DisplayName = ociObj.DisplayName + } + if overwrite || adb.Spec.Details.DbName == nil { + adb.Spec.Details.DbName = ociObj.DbName + } + if overwrite || adb.Spec.Details.DbWorkload == "" { + adb.Spec.Details.DbWorkload = ociObj.DbWorkload + } + if overwrite || adb.Spec.Details.LicenseModel == "" { + adb.Spec.Details.LicenseModel = ociObj.LicenseModel + } + if overwrite || adb.Spec.Details.DbVersion == nil { + adb.Spec.Details.DbVersion = ociObj.DbVersion + } + if overwrite || adb.Spec.Details.DataStorageSizeInTBs == nil { + adb.Spec.Details.DataStorageSizeInTBs = ociObj.DataStorageSizeInTBs + } + if overwrite || adb.Spec.Details.CpuCoreCount == nil { + adb.Spec.Details.CpuCoreCount = ociObj.CpuCoreCount + } + if overwrite || adb.Spec.Details.ComputeModel == "" { + adb.Spec.Details.ComputeModel = ociObj.ComputeModel + } + if overwrite || adb.Spec.Details.OcpuCount == nil { + adb.Spec.Details.OcpuCount = ociObj.OcpuCount + } + if overwrite || adb.Spec.Details.ComputeCount == nil { + adb.Spec.Details.ComputeCount = ociObj.ComputeCount + } + if overwrite || adb.Spec.Details.IsAutoScalingEnabled == nil { + adb.Spec.Details.IsAutoScalingEnabled = ociObj.IsAutoScalingEnabled + } + if overwrite || adb.Spec.Details.IsDedicated == nil { + adb.Spec.Details.IsDedicated = ociObj.IsDedicated + } + if overwrite || adb.Spec.Details.IsFreeTier == nil { + adb.Spec.Details.IsFreeTier = ociObj.IsFreeTier + } + if overwrite || adb.Spec.Details.FreeformTags == nil { + // Special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. + if len(ociObj.FreeformTags) != 0 { + adb.Spec.Details.FreeformTags = ociObj.FreeformTags + } else { + adb.Spec.Details.FreeformTags = nil + } + } + + if overwrite || adb.Spec.Details.IsAccessControlEnabled == nil { + adb.Spec.Details.IsAccessControlEnabled = ociObj.IsAccessControlEnabled + } + + if overwrite || adb.Spec.Details.WhitelistedIps == nil { + if len(ociObj.WhitelistedIps) != 0 { + adb.Spec.Details.WhitelistedIps = ociObj.WhitelistedIps + } else { + adb.Spec.Details.WhitelistedIps = nil + } + } + if overwrite || adb.Spec.Details.IsMtlsConnectionRequired == nil { + adb.Spec.Details.IsMtlsConnectionRequired = ociObj.IsMtlsConnectionRequired + } + if overwrite || adb.Spec.Details.SubnetId == nil { + adb.Spec.Details.SubnetId = ociObj.SubnetId + } + if overwrite || adb.Spec.Details.NsgIds == nil { + if len(ociObj.NsgIds) != 0 { + adb.Spec.Details.NsgIds = ociObj.NsgIds + } else { + adb.Spec.Details.NsgIds = nil + } + } + if overwrite || adb.Spec.Details.PrivateEndpointLabel == nil { + adb.Spec.Details.PrivateEndpointLabel = ociObj.PrivateEndpointLabel + } + + /*********************************** + * update the status subresource + ***********************************/ + adb.UpdateStatusFromOciAdb(ociObj) + + return !reflect.DeepEqual(oldADB.Spec, adb.Spec) +} + +// RemoveUnchangedDetails removes the unchanged fields in spec.details, and returns if the details has been changed. +func (adb *AutonomousDatabase) RemoveUnchangedDetails(prevSpec AutonomousDatabaseSpec) (bool, error) { + + changed, err := RemoveUnchangedFields(prevSpec.Details, &adb.Spec.Details) + if err != nil { + return changed, err + } + + return changed, nil +} + +// A helper function which is useful for debugging. The function prints out a structural JSON format. +func (adb *AutonomousDatabase) String() (string, error) { + out, err := json.MarshalIndent(adb, "", " ") + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/apis/database/v4/autonomousdatabase_webhook.go b/apis/database/v4/autonomousdatabase_webhook.go new file mode 100644 index 00000000..f7eb60aa --- /dev/null +++ b/apis/database/v4/autonomousdatabase_webhook.go @@ -0,0 +1,170 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaselog = logf.Log.WithName("autonomousdatabase-resource") + +func (r *AutonomousDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v4,name=vautonomousdatabasev4.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &AutonomousDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +// ValidateCreate checks if the spec is valid for a provisioning or a binding operation +func (r *AutonomousDatabase) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + autonomousdatabaselog.Info("validate create", "name", r.Name) + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldADB *AutonomousDatabase = old.(*AutonomousDatabase) + + autonomousdatabaselog.Info("validate update", "name", r.Name) + + // skip the verification of adding ADB OCID or binding + // if oldADB.Status.LifecycleState == "" { + // return nil, nil + // } + + // cannot update when the old state is in intermediate, except for the change to the hardLink or the terminate operatrion during valid lifecycleState + // var copySpec *AutonomousDatabaseSpec = r.Spec.DeepCopy() + // specChanged, err := RemoveUnchangedFields(oldADB.Spec, copySpec) + // if err != nil { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), err.Error())) + // } + + // hardLinkChanged := copySpec.HardLink != nil + + // isTerminateOp := CanBeTerminated(oldADB.Status.LifecycleState) && copySpec.Action == "Terminate" + + // if specChanged && IsAdbIntermediateState(oldADB.Status.LifecycleState) && !isTerminateOp && !hardLinkChanged { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), + // "cannot change the spec when the lifecycleState is in an intermdeiate state")) + // } + + // cannot modify autonomousDatabaseOCID + if r.Spec.Details.Id != nil && + oldADB.Spec.Details.Id != nil && + *r.Spec.Details.Id != *oldADB.Spec.Details.Id { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("autonomousDatabaseOCID"), + "autonomousDatabaseOCID cannot be modified")) + } + + allErrs = validateCommon(r, allErrs) + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +func validateCommon(adb *AutonomousDatabase, allErrs field.ErrorList) field.ErrorList { + // password + if adb.Spec.Details.AdminPassword.K8sSecret.Name != nil && adb.Spec.Details.AdminPassword.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("adminPassword"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + if adb.Spec.Wallet.Password.K8sSecret.Name != nil && adb.Spec.Wallet.Password.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("wallet").Child("password"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaselog.Info("validate delete", "name", r.Name) + return nil, nil +} + +// Returns true if AutonomousContainerDatabaseOCID has value. +// We don't use Details.IsDedicated because the parameter might be null when it's a provision operation. +func isDedicated(adb *AutonomousDatabase) bool { + return adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name != nil || + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id != nil +} diff --git a/apis/database/v4/autonomousdatabasebackup_types.go b/apis/database/v4/autonomousdatabasebackup_types.go new file mode 100644 index 00000000..925256c0 --- /dev/null +++ b/apis/database/v4/autonomousdatabasebackup_types.go @@ -0,0 +1,129 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AutonomousDatabaseBackupSpec defines the desired state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` + IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` + RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupStatus struct { + LifecycleState database.AutonomousDatabaseBackupLifecycleStateEnum `json:"lifecycleState"` + Type database.AutonomousDatabaseBackupTypeEnum `json:"type"` + IsAutomatic bool `json:"isAutomatic"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + AutonomousDatabaseOCID string `json:"autonomousDatabaseOCID"` + CompartmentOCID string `json:"compartmentOCID"` + DBName string `json:"dbName"` + DBDisplayName string `json:"dbDisplayName"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbbu";"adbbus" +//+kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.dbDisplayName",name="DB DisplayName",type=string +//+kubebuilder:printcolumn:JSONPath=".status.type",name="Type",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeStarted",name="Started",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeEnded",name="Ended",type=string +// +kubebuilder:storageversion + +// AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups API +type AutonomousDatabaseBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseBackupSpec `json:"spec,omitempty"` + Status AutonomousDatabaseBackupStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseBackupList contains a list of AutonomousDatabaseBackup +type AutonomousDatabaseBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseBackup{}, &AutonomousDatabaseBackupList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabaseBackup) Hub() {} + +func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database.AutonomousDatabaseBackup, ociADB database.AutonomousDatabase) { + b.Status.AutonomousDatabaseOCID = *ociBackup.AutonomousDatabaseId + b.Status.CompartmentOCID = *ociBackup.CompartmentId + b.Status.Type = ociBackup.Type + b.Status.IsAutomatic = *ociBackup.IsAutomatic + + b.Status.LifecycleState = ociBackup.LifecycleState + + b.Status.TimeStarted = FormatSDKTime(ociBackup.TimeStarted) + b.Status.TimeEnded = FormatSDKTime(ociBackup.TimeEnded) + + b.Status.DBDisplayName = *ociADB.DisplayName + b.Status.DBName = *ociADB.DbName +} + +// GetTimeEnded returns the status.timeEnded in SDKTime format +func (b *AutonomousDatabaseBackup) GetTimeEnded() (*common.SDKTime, error) { + return ParseDisplayTime(b.Status.TimeEnded) +} diff --git a/apis/database/v4/autonomousdatabasebackup_webhook.go b/apis/database/v4/autonomousdatabasebackup_webhook.go new file mode 100644 index 00000000..7858adce --- /dev/null +++ b/apis/database/v4/autonomousdatabasebackup_webhook.go @@ -0,0 +1,158 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabasebackuplog = logf.Log.WithName("autonomousdatabasebackup-resource") + +func (r *AutonomousDatabaseBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-autonomousdatabasebackup,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=create;update,versions=v4,name=mautonomousdatabasebackupv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &AutonomousDatabaseBackup{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) Default() { + autonomousdatabasebackuplog.Info("default", "name", r.Name) +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabasebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,versions=v4,name=vautonomousdatabasebackupv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseBackup{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateCreate() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.OCID == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.OCID != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB or ociADB, but not both")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + oldBackup := old.(*AutonomousDatabaseBackup) + + if oldBackup.Spec.AutonomousDatabaseBackupOCID != nil && r.Spec.AutonomousDatabaseBackupOCID != nil && + *oldBackup.Spec.AutonomousDatabaseBackupOCID != *r.Spec.AutonomousDatabaseBackupOCID { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("autonomousDatabaseBackupOCID"), + "cannot assign a new autonomousDatabaseBackupOCID to this backup")) + } + + if oldBackup.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.K8sAdb.Name != nil && + *oldBackup.Spec.Target.K8sAdb.Name != *r.Spec.Target.K8sAdb.Name { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("k8sADB").Child("name"), "cannot assign a new name to the target")) + } + + if oldBackup.Spec.Target.OciAdb.OCID != nil && r.Spec.Target.OciAdb.OCID != nil && + *oldBackup.Spec.Target.OciAdb.OCID != *r.Spec.Target.OciAdb.OCID { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("ociADB").Child("ocid"), "cannot assign a new ocid to the target")) + } + + if oldBackup.Spec.DisplayName != nil && r.Spec.DisplayName != nil && + *oldBackup.Spec.DisplayName != *r.Spec.DisplayName { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("displayName"), "cannot assign a new displayName to this backup")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateDelete() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/autonomousdatabaserestore_types.go b/apis/database/v4/autonomousdatabaserestore_types.go new file mode 100644 index 00000000..3337c983 --- /dev/null +++ b/apis/database/v4/autonomousdatabaserestore_types.go @@ -0,0 +1,142 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type K8sADBBackupSpec struct { + Name *string `json:"name,omitempty"` +} + +type PITSpec struct { + // The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT + Timestamp *string `json:"timestamp,omitempty"` +} + +type SourceSpec struct { + K8sAdbBackup K8sADBBackupSpec `json:"k8sADBBackup,omitempty"` + PointInTime PITSpec `json:"pointInTime,omitempty"` +} + +// AutonomousDatabaseRestoreSpec defines the desired state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target"` + Source SourceSpec `json:"source"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseRestoreStatus defines the observed state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + DisplayName string `json:"displayName"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + DbName string `json:"dbName"` + WorkRequestOCID string `json:"workRequestOCID"` + Status workrequests.WorkRequestStatusEnum `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbr";"adbrs" +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.displayName",name="DbDisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.dbName",name="DbName",type=string +// +kubebuilder:storageversion + +// AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores API +type AutonomousDatabaseRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseRestoreSpec `json:"spec,omitempty"` + Status AutonomousDatabaseRestoreStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseRestoreList contains a list of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseRestore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseRestore{}, &AutonomousDatabaseRestoreList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabaseRestore) Hub() {} + +// GetPIT returns the spec.pointInTime.timeStamp in SDKTime format +func (r *AutonomousDatabaseRestore) GetPIT() (*common.SDKTime, error) { + if r.Spec.Source.PointInTime.Timestamp == nil { + return nil, errors.New("the timestamp is empty") + } + return ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) +} + +func (r *AutonomousDatabaseRestore) UpdateStatus( + adb database.AutonomousDatabase, + workResp workrequests.GetWorkRequestResponse) { + + r.Status.DisplayName = *adb.DisplayName + r.Status.DbName = *adb.DbName + + r.Status.WorkRequestOCID = *workResp.Id + r.Status.Status = workResp.Status + r.Status.TimeAccepted = FormatSDKTime(workResp.TimeAccepted) + r.Status.TimeStarted = FormatSDKTime(workResp.TimeStarted) + r.Status.TimeEnded = FormatSDKTime(workResp.TimeFinished) +} diff --git a/apis/database/v4/autonomousdatabaserestore_webhook.go b/apis/database/v4/autonomousdatabaserestore_webhook.go new file mode 100644 index 00000000..6e3b4656 --- /dev/null +++ b/apis/database/v4/autonomousdatabaserestore_webhook.go @@ -0,0 +1,146 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaserestorelog = logf.Log.WithName("autonomousdatabaserestore-resource") + +func (r *AutonomousDatabaseRestore) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabaserestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabaserestores,versions=v4,name=vautonomousdatabaserestorev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseRestore{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateCreate() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + // Validate the target ADB + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.OCID == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.OCID != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB.name or ociADB.ocid, but not both")) + } + + // Validate the restore source + if r.Spec.Source.K8sAdbBackup.Name == nil && + r.Spec.Source.PointInTime.Timestamp == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "retore source is empty")) + } + + if r.Spec.Source.K8sAdbBackup.Name != nil && + r.Spec.Source.PointInTime.Timestamp != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "cannot apply backupName and the PITR parameters at the same time")) + } + + // Verify the timestamp format if it's PITR + if r.Spec.Source.PointInTime.Timestamp != nil { + _, err := ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source").Child("pointInTime").Child("timestamp"), "invalid timestamp format")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/cdb_types.go b/apis/database/v4/cdb_types.go new file mode 100644 index 00000000..ce3f6f28 --- /dev/null +++ b/apis/database/v4/cdb_types.go @@ -0,0 +1,191 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CDBSpec defines the desired state of CDB +type CDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // Name of the CDB Service + ServiceName string `json:"serviceName,omitempty"` + + // Password for the CDB System Administrator + SysAdminPwd CDBSysAdminPassword `json:"sysAdminPwd,omitempty"` + // User in the root container with sysdba priviledges to manage PDB lifecycle + CDBAdminUser CDBAdminUser `json:"cdbAdminUser,omitempty"` + // Password for the CDB Administrator to manage PDB lifecycle + CDBAdminPwd CDBAdminPassword `json:"cdbAdminPwd,omitempty"` + + CDBTlsKey CDBTLSKEY `json:"cdbTlsKey,omitempty"` + CDBTlsCrt CDBTLSCRT `json:"cdbTlsCrt,omitempty"` + + // Password for user ORDS_PUBLIC_USER + ORDSPwd ORDSPassword `json:"ordsPwd,omitempty"` + // ORDS server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. + ORDSPort int `json:"ordsPort,omitempty"` + // ORDS Image Name + ORDSImage string `json:"ordsImage,omitempty"` + // The name of the image pull secret in case of a private docker repository. + ORDSImagePullSecret string `json:"ordsImagePullSecret,omitempty"` + // ORDS Image Pull Policy + // +kubebuilder:validation:Enum=Always;Never + ORDSImagePullPolicy string `json:"ordsImagePullPolicy,omitempty"` + // Number of ORDS Containers to create + Replicas int `json:"replicas,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUser WebServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebServerPwd WebServerPassword `json:"webServerPwd,omitempty"` + // Name of the DB server + DBServer string `json:"dbServer,omitempty"` + // DB server port + DBPort int `json:"dbPort,omitempty"` + // Node Selector for running the Pod + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + CDBPubKey CDBPUBKEY `json:"cdbOrdsPubKey,omitempty"` + CDBPriKey CDBPRIVKEY `json:"cdbOrdsPrvKey,omitempty"` +} + +// CDBSecret defines the secretName +type CDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +// CDBSysAdminPassword defines the secret containing SysAdmin Password mapped to key 'sysAdminPwd' for CDB +type CDBSysAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminUser defines the secret containing CDB Administrator User mapped to key 'cdbAdminUser' to manage PDB lifecycle +type CDBAdminUser struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminPassword defines the secret containing CDB Administrator Password mapped to key 'cdbAdminPwd' to manage PDB lifecycle +type CDBAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// ORDSPassword defines the secret containing ORDS_PUBLIC_USER Password mapped to key 'ordsPwd' +type ORDSPassword struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle +type WebServerUser struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPassword struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSCRT struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPUBKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPRIVKEY struct { + Secret CDBSecret `json:"secret"` +} + +// CDBStatus defines the observed state of CDB +type CDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Phase of the CDB Resource + Phase string `json:"phase"` + // CDB Resource Status + Status bool `json:"status"` + // Message + Msg string `json:"msg,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" +// +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the CDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description=" string of the tnsalias" +// +kubebuilder:resource:path=cdbs,scope=Namespaced +// +kubebuilder:storageversion + +// CDB is the Schema for the cdbs API +type CDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CDBSpec `json:"spec,omitempty"` + Status CDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CDBList contains a list of CDB +type CDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CDB{}, &CDBList{}) +} diff --git a/apis/database/v4/cdb_webhook.go b/apis/database/v4/cdb_webhook.go new file mode 100644 index 00000000..235b2627 --- /dev/null +++ b/apis/database/v4/cdb_webhook.go @@ -0,0 +1,224 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var cdblog = logf.Log.WithName("cdb-webhook") + +func (r *CDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-cdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=mcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &CDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *CDB) Default() { + cdblog.Info("Setting default values in CDB spec for : " + r.Name) + + if r.Spec.ORDSPort == 0 { + r.Spec.ORDSPort = 8888 + } + + if r.Spec.Replicas == 0 { + r.Spec.Replicas = 1 + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-cdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=vcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &CDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateCreate() (admission.Warnings, error) { + cdblog.Info("ValidateCreate", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.ServiceName == "" && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("serviceName"), "Please specify CDB Service name")) + } + + if reflect.ValueOf(r.Spec.CDBTlsKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsKey"), "Please specify CDB Tls key(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBTlsCrt).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsCrt"), "Please specify CDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBPriKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("CDBPriKey"), "Please specify CDB CDBPriKey(secret)")) + } + + /*if r.Spec.SCANName == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for CDB")) + }*/ + + if (r.Spec.DBServer == "" && r.Spec.DBTnsurl == "") || (r.Spec.DBServer != "" && r.Spec.DBTnsurl != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "Please specify Database Server Name/IP Address or tnsalias string")) + } + + if r.Spec.DBTnsurl != "" && (r.Spec.DBServer != "" || r.Spec.DBPort != 0 || r.Spec.ServiceName != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "DBtnsurl is orthogonal to (DBServer,DBport,Services)")) + } + + if r.Spec.DBPort == 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify DB Server Port")) + } + if r.Spec.DBPort < 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if r.Spec.ORDSImage == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsImage"), "Please specify name of ORDS Image to be used")) + } + if reflect.ValueOf(r.Spec.CDBAdminUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminUser"), "Please specify user in the root container with sysdba priviledges to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.CDBAdminPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminPwd"), "Please specify password for the CDB Administrator to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.ORDSPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPwd"), "Please specify password for user ORDS_PUBLIC_USER")) + } + if reflect.ValueOf(r.Spec.WebServerUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerUser"), "Please specify the Web Server User having SQL Administrator role")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify password for the Web Server User having SQL Administrator role")) + } + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + cdblog.Info("validate update", "name", r.Name) + + isCDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isCDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + + // Check for updation errors + oldCDB, ok := old.(*CDB) + if !ok { + return nil, nil + } + + if r.Spec.DBPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if !strings.EqualFold(oldCDB.Spec.ServiceName, r.Spec.ServiceName) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateDelete() (admission.Warnings, error) { + cdblog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/dataguardbroker_conversion.go b/apis/database/v4/dataguardbroker_conversion.go new file mode 100644 index 00000000..c63210e0 --- /dev/null +++ b/apis/database/v4/dataguardbroker_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*DataguardBroker) Hub() {} diff --git a/apis/database/v4/dataguardbroker_types.go b/apis/database/v4/dataguardbroker_types.go new file mode 100644 index 00000000..cec11ca4 --- /dev/null +++ b/apis/database/v4/dataguardbroker_types.go @@ -0,0 +1,163 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DataguardBrokerSpec defines the desired state of DataguardBroker +type DataguardBrokerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef"` + StandbyDatabaseRefs []string `json:"standbyDatabaseRefs"` + SetAsPrimaryDatabase string `json:"setAsPrimaryDatabase,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // +kubebuilder:validation:Enum=MaxPerformance;MaxAvailability + ProtectionMode string `json:"protectionMode"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + FastStartFailover bool `json:"fastStartFailover,omitempty"` +} + +// DataguardBrokerStatus defines the observed state of DataguardBroker +type DataguardBrokerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + ProtectionMode string `json:"protectionMode,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + StandbyDatabases string `json:"standbyDatabases,omitempty"` + ExternalConnectString string `json:"externalConnectString,omitempty"` + ClusterConnectString string `json:"clusterConnectString,omitempty"` + Status string `json:"status,omitempty"` + + FastStartFailover string `json:"fastStartFailover,omitempty"` + DatabasesInDataguardConfig map[string]string `json:"databasesInDataguardConfig,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.primaryDatabase",name="Primary",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.standbyDatabases",name="Standbys",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.protectionMode",name="Protection Mode",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.clusterConnectString",name="Cluster Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.externalConnectString",name="Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.primaryDatabaseRef",name="Primary Database",type="string", priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.fastStartFailover",name="FSFO", type="string" + +// DataguardBroker is the Schema for the dataguardbrokers API +// +kubebuilder:storageversion +type DataguardBroker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataguardBrokerSpec `json:"spec,omitempty"` + Status DataguardBrokerStatus `json:"status,omitempty"` +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns the current primary database in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetCurrentPrimaryDatabase() string { + if broker.Status.PrimaryDatabase != "" { + return broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + } + return broker.Spec.PrimaryDatabaseRef +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns databases in Dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetDatabasesInDataGuardConfiguration() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.PrimaryDatabaseRef) + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns standby databases in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetStandbyDatabasesInDgConfig() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" && value != broker.Status.PrimaryDatabase { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +//+kubebuilder:object:root=true + +// DataguardBrokerList contains a list of DataguardBroker +type DataguardBrokerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataguardBroker `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataguardBroker{}, &DataguardBrokerList{}) +} diff --git a/apis/database/v4/dataguardbroker_webhook.go b/apis/database/v4/dataguardbroker_webhook.go new file mode 100644 index 00000000..bcd35de9 --- /dev/null +++ b/apis/database/v4/dataguardbroker_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var dataguardbrokerlog = logf.Log.WithName("dataguardbroker-resource") + +func (r *DataguardBroker) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/dbcssystem_conversion.go b/apis/database/v4/dbcssystem_conversion.go new file mode 100644 index 00000000..e5919f54 --- /dev/null +++ b/apis/database/v4/dbcssystem_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*DbcsSystem) Hub() {} diff --git a/apis/database/v4/dbcssystem_kms_types.go b/apis/database/v4/dbcssystem_kms_types.go new file mode 100644 index 00000000..8cbff504 --- /dev/null +++ b/apis/database/v4/dbcssystem_kms_types.go @@ -0,0 +1,141 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +import "encoding/json" + +type KMSConfig struct { + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} +type KMSDetailsStatus struct { + VaultId string `json:"vaultId,omitempty"` + ManagementEndpoint string `json:"managementEndpoint,omitempty"` + KeyId string `json:"keyId,omitempty"` + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} + +const ( + lastSuccessfulKMSConfig = "lastSuccessfulKMSConfig" + lastSuccessfulKMSStatus = "lastSuccessfulKMSStatus" +) + +// GetLastSuccessfulKMSConfig returns the KMS config from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSConfig. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSConfig() (*KMSConfig, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSConfig] + if !ok { + return nil, nil + } + + configBytes := []byte(val) + kmsConfig := KMSConfig{} + + err := json.Unmarshal(configBytes, &kmsConfig) + if err != nil { + return nil, err + } + + return &kmsConfig, nil +} + +// GetLastSuccessfulKMSStatus returns the KMS status from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSStatus. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSStatus() (*KMSDetailsStatus, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSStatus] + if !ok { + return nil, nil + } + + statusBytes := []byte(val) + kmsStatus := KMSDetailsStatus{} + + err := json.Unmarshal(statusBytes, &kmsStatus) + if err != nil { + return nil, err + } + + return &kmsStatus, nil +} + +// SetLastSuccessfulKMSConfig saves the given KMSConfig to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSConfig(kmsConfig *KMSConfig) error { + configBytes, err := json.Marshal(kmsConfig) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSConfig] = string(configBytes) + dbcs.SetAnnotations(annotations) + return nil +} + +// SetLastSuccessfulKMSStatus saves the given KMSDetailsStatus to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSStatus(kmsStatus *KMSDetailsStatus) error { + statusBytes, err := json.Marshal(kmsStatus) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSStatus] = string(statusBytes) + dbcs.SetAnnotations(annotations) + // Update KMSDetailsStatus in DbcsSystemStatus + dbcs.Status.KMSDetailsStatus = KMSDetailsStatus{ + VaultName: kmsStatus.VaultName, + CompartmentId: kmsStatus.CompartmentId, + KeyName: kmsStatus.KeyName, + EncryptionAlgo: kmsStatus.EncryptionAlgo, + VaultType: kmsStatus.VaultType, + } + return nil +} diff --git a/apis/database/v4/dbcssystem_pdbconfig_types.go b/apis/database/v4/dbcssystem_pdbconfig_types.go new file mode 100644 index 00000000..2ae361e5 --- /dev/null +++ b/apis/database/v4/dbcssystem_pdbconfig_types.go @@ -0,0 +1,83 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +// PDBConfig defines details of PDB struct for DBCS systems +type PDBConfig struct { + // The name for the pluggable database (PDB). The name is unique in the context of a Database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. The pluggable database name should not be same as the container database name. + PdbName *string `mandatory:"true" json:"pdbName"` + + // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the CDB + // ContainerDatabaseId *string `mandatory:"false" json:"containerDatabaseId"` + + // // A strong password for PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, \#, or -. + PdbAdminPassword *string `mandatory:"false" json:"pdbAdminPassword"` + + // // The existing TDE wallet password of the CDB. + TdeWalletPassword *string `mandatory:"false" json:"tdeWalletPassword"` + + // // The locked mode of the pluggable database admin account. If false, the user needs to provide the PDB Admin Password to connect to it. + // // If true, the pluggable database will be locked and user cannot login to it. + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + + // // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // // Example: `{"Department": "Finance"}` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + + // // Defined tags for this resource. Each key is predefined and scoped to a namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` + + // To specify whether to delete the PDB + IsDelete *bool `mandatory:"false" json:"isDelete,omitempty"` + + // The OCID of the PDB for deletion purposes. + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` +} + +type PDBConfigStatus struct { + PdbName *string `mandatory:"false" json:"pdbName,omitempty"` + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked,omitempty"` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags,omitempty"` + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` + PdbLifecycleState LifecycleState `json:"pdbState,omitempty"` +} +type PDBDetailsStatus struct { + PDBConfigStatus []PDBConfigStatus `json:"pdbConfigStatus,omitempty"` +} diff --git a/apis/database/v4/dbcssystem_types.go b/apis/database/v4/dbcssystem_types.go new file mode 100644 index 00000000..9810a3b7 --- /dev/null +++ b/apis/database/v4/dbcssystem_types.go @@ -0,0 +1,292 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/go-logr/logr" + dbcsv1 "github.com/oracle/oracle-database-operator/commons/annotations" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DbcsSystemSpec defines the desired state of DbcsSystem +type DbcsSystemSpec struct { + DbSystem DbSystemDetails `json:"dbSystem,omitempty"` + Id *string `json:"id,omitempty"` + OCIConfigMap *string `json:"ociConfigMap"` + OCISecret *string `json:"ociSecret,omitempty"` + DbClone *DbCloneConfig `json:"dbClone,omitempty"` + HardLink bool `json:"hardLink,omitempty"` + PdbConfigs []PDBConfig `json:"pdbConfigs,omitempty"` + SetupDBCloning bool `json:"setupDBCloning,omitempty"` + DbBackupId *string `json:"dbBackupId,omitempty"` + DatabaseId *string `json:"databaseId,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DbSystemDetails Spec + +type DbSystemDetails struct { + CompartmentId string `json:"compartmentId"` + AvailabilityDomain string `json:"availabilityDomain"` + SubnetId string `json:"subnetId"` + Shape string `json:"shape"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + HostName string `json:"hostName"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + FaultDomains []string `json:"faultDomains,omitempty"` + DisplayName string `json:"displayName,omitempty"` + BackupSubnetId string `json:"backupSubnetId,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + NodeCount *int `json:"nodeCount,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` + Domain string `json:"domain,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + ClusterName string `json:"clusterName,omitempty"` + DbAdminPasswordSecret string `json:"dbAdminPasswordSecret"` + DbName string `json:"dbName,omitempty"` + PdbName string `json:"pdbName,omitempty"` + DbDomain string `json:"dbDomain,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + DbVersion string `json:"dbVersion,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + DiskRedundancy string `json:"diskRedundancy,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + DbBackupConfig Backupconfig `json:"dbBackupConfig,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DB Backup Config Network Struct +type Backupconfig struct { + AutoBackupEnabled *bool `json:"autoBackupEnabled,omitempty"` + RecoveryWindowsInDays *int `json:"recoveryWindowsInDays,omitempty"` + AutoBackupWindow *string `json:"autoBackupWindow,omitempty"` + BackupDestinationDetails *string `json:"backupDestinationDetails,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbcsSystemStatus struct { + Id *string `json:"id,omitempty"` + DisplayName string `json:"displayName,omitempty"` + AvailabilityDomain string `json:"availabilityDomain,omitempty"` + SubnetId string `json:"subnetId,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + NodeCount int `json:"nodeCount,omitempty"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + + DbEdition string `json:"dbEdition,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + DataStoragePercentage *int `json:"dataStoragePercentage,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + DataStorageSizeInGBs *int `json:"dataStorageSizeInGBs,omitempty"` + RecoStorageSizeInGB *int `json:"recoStorageSizeInGB,omitempty"` + + Shape *string `json:"shape,omitempty"` + State LifecycleState `json:"state"` + DbInfo []DbStatus `json:"dbInfo,omitempty"` + Network VmNetworkDetails `json:"network,omitempty"` + WorkRequests []DbWorkrequests `json:"workRequests,omitempty"` + KMSDetailsStatus KMSDetailsStatus `json:"kmsDetailsStatus,omitempty"` + DbCloneStatus DbCloneStatus `json:"dbCloneStatus,omitempty"` + PdbDetailsStatus []PDBDetailsStatus `json:"pdbDetailsStatus,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbStatus struct { + Id *string `json:"id,omitempty"` + DbName string `json:"dbName,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + DbHomeId string `json:"dbHomeId,omitempty"` +} + +type DbWorkrequests struct { + OperationType *string `json:"operationType,omitmpty"` + OperationId *string `json:"operationId,omitemty"` + PercentComplete string `json:"percentComplete,omitempty"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeFinished string `json:"timeFinished,omitempty"` +} + +type VmNetworkDetails struct { + VcnName *string `json:"vcnName,omitempty"` + SubnetName *string `json:"clientSubnet,omitempty"` + ScanDnsName *string `json:"scanDnsName,omitempty"` + HostName string `json:"hostName,omitempty"` + DomainName string `json:"domainName,omitempty"` + ListenerPort *int `json:"listenerPort,omitempty"` + NetworkSG string `json:"networkSG,omitempty"` +} + +// DbCloneConfig defines the configuration for the database clone +type DbCloneConfig struct { + DbAdminPasswordSecret string `json:"dbAdminPasswordSecret,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + DbName string `json:"dbName"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId"` + SidPrefix string `json:"sidPrefix,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + KmsKeyId string `json:"kmsKeyId,omitempty"` + KmsKeyVersionId string `json:"kmsKeyVersionId,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` +} + +// DbCloneStatus defines the observed state of DbClone +type DbCloneStatus struct { + Id *string `json:"id,omitempty"` + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + DbName string `json:"dbName,omitempty"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=dbcssystems,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:storageversion + +// DbcsSystem is the Schema for the dbcssystems API +type DbcsSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DbcsSystemSpec `json:"spec,omitempty"` + Status DbcsSystemStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DbcsSystemList contains a list of DbcsSystem +type DbcsSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DbcsSystem `json:"items"` +} + +type LifecycleState string + +const ( + Available LifecycleState = "AVAILABLE" + Failed LifecycleState = "FAILED" + Update LifecycleState = "UPDATING" + Provision LifecycleState = "PROVISIONING" + Terminate LifecycleState = "TERMINATED" +) + +const lastSuccessfulSpec = "lastSuccessfulSpec" + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (dbcs *DbcsSystem) GetLastSuccessfulSpec() (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} +func (dbcs *DbcsSystem) GetLastSuccessfulSpecWithLog(log logr.Logger) (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + log.Info("No last successful spec annotation found") + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + log.Error(err, "Failed to unmarshal last successful spec") + return nil, err + } + + log.Info("Successfully retrieved last successful spec", "spec", sucSpec) + return &sucSpec, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (dbcs *DbcsSystem) UpdateLastSuccessfulSpec(kubeClient client.Client) error { + specBytes, err := json.Marshal(dbcs.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + lastSuccessfulSpec: string(specBytes), + } + + // return dbcsv1.SetAnnotations(kubeClient, dbcs, anns) + return dbcsv1.PatchAnnotations(kubeClient, dbcs, anns) + +} + +func init() { + SchemeBuilder.Register(&DbcsSystem{}, &DbcsSystemList{}) +} diff --git a/apis/database/v4/dbcssystem_webhook.go b/apis/database/v4/dbcssystem_webhook.go new file mode 100644 index 00000000..c3ff8ddb --- /dev/null +++ b/apis/database/v4/dbcssystem_webhook.go @@ -0,0 +1,98 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var dbcssystemlog = logf.Log.WithName("dbcssystem-resource") + +func (r *DbcsSystem) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-dbcssystem,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=dbcssystems,verbs=create;update,versions=v4,name=mdbcssystemv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DbcsSystem{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DbcsSystem) Default() { + dbcssystemlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. + +// +kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-dbcssystem,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dbcssystems,versions=v4,name=vdbcssystemv4.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &DbcsSystem{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateCreate() (admission.Warnings, error) { + dbcssystemlog.Info("validate create", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object creation. + return nil, nil +} + +// // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + dbcssystemlog.Info("validate update", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateDelete() (admission.Warnings, error) { + dbcssystemlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/groupversion_info.go b/apis/database/v4/groupversion_info.go new file mode 100644 index 00000000..6644b93c --- /dev/null +++ b/apis/database/v4/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v4 contains API Schema definitions for the database v4 API group +// +kubebuilder:object:generate=true +// +groupName=database.oracle.com +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "database.oracle.com", Version: "v4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/database/v4/lrest_types.go b/apis/database/v4/lrest_types.go new file mode 100644 index 00000000..421a3ea1 --- /dev/null +++ b/apis/database/v4/lrest_types.go @@ -0,0 +1,191 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LRESTSpec defines the desired state of LREST +type LRESTSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the LREST + LRESTName string `json:"cdbName,omitempty"` + // Name of the LREST Service + ServiceName string `json:"serviceName,omitempty"` + + // Password for the LREST System Administrator + SysAdminPwd LRESTSysAdminPassword `json:"sysAdminPwd,omitempty"` + // User in the root container with sysdba priviledges to manage PDB lifecycle + LRESTAdminUser LRESTAdminUser `json:"cdbAdminUser,omitempty"` + // Password for the LREST Administrator to manage PDB lifecycle + LRESTAdminPwd LRESTAdminPassword `json:"cdbAdminPwd,omitempty"` + + LRESTTlsKey LRESTTLSKEY `json:"cdbTlsKey,omitempty"` + LRESTTlsCrt LRESTTLSCRT `json:"cdbTlsCrt,omitempty"` + LRESTPubKey LRESTPUBKEY `json:"cdbPubKey,omitempty"` + LRESTPriKey LRESTPRVKEY `json:"cdbPrvKey,omitempty"` + + // Password for user LREST_PUBLIC_USER + LRESTPwd LRESTPassword `json:"lrestPwd,omitempty"` + // LREST server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. + LRESTPort int `json:"lrestPort,omitempty"` + // LREST Image Name + LRESTImage string `json:"lrestImage,omitempty"` + // The name of the image pull secret in case of a private docker repository. + LRESTImagePullSecret string `json:"lrestImagePullSecret,omitempty"` + // LREST Image Pull Policy + // +kubebuilder:validation:Enum=Always;Never + LRESTImagePullPolicy string `json:"lrestImagePullPolicy,omitempty"` + // Number of LREST Containers to create + Replicas int `json:"replicas,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebLrestServerUser WebLrestServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebLrestServerPwd WebLrestServerPassword `json:"webServerPwd,omitempty"` + // Name of the DB server + DBServer string `json:"dbServer,omitempty"` + // DB server port + DBPort int `json:"dbPort,omitempty"` + // Node Selector for running the Pod + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` +} + +// LRESTSecret defines the secretName +type LRESTSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +// LRESTSysAdminPassword defines the secret containing SysAdmin Password mapped to key 'sysAdminPwd' for LREST +type LRESTSysAdminPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTAdminUser defines the secret containing LREST Administrator User mapped to key 'lrestAdminUser' to manage PDB lifecycle +type LRESTAdminUser struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTAdminPassword defines the secret containing LREST Administrator Password mapped to key 'lrestAdminPwd' to manage PDB lifecycle +type LRESTAdminPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTPassword defines the secret containing LREST_PUBLIC_USER Password mapped to key 'ordsPwd' +type LRESTPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// WebLrestServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle +type WebLrestServerUser struct { + Secret LRESTSecret `json:"secret"` +} + +// WebLrestServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebLrestServerPassword struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTTLSKEY struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTTLSCRT struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTPUBKEY struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTPRVKEY struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTStatus defines the observed state of LREST +type LRESTStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Phase of the LREST Resource + Phase string `json:"phase"` + // LREST Resource Status + Status bool `json:"status"` + // Message + Msg string `json:"msg,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB NAME",type="string",description="Name of the LREST" +// +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" +// +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the LREST Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description="string of the tnsalias" +// +kubebuilder:resource:path=lrests,scope=Namespaced +// +kubebuilder:storageversion + +// LREST is the Schema for the lrests API +type LREST struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LRESTSpec `json:"spec,omitempty"` + Status LRESTStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// LRESTList contains a list of LREST +type LRESTList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LREST `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LREST{}, &LRESTList{}) +} diff --git a/apis/database/v4/lrest_webhook.go b/apis/database/v4/lrest_webhook.go new file mode 100644 index 00000000..9d65a1d6 --- /dev/null +++ b/apis/database/v4/lrest_webhook.go @@ -0,0 +1,219 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var lrestlog = logf.Log.WithName("lrest-webhook") + +func (r *LREST) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-lrest,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrests,verbs=create;update,versions=v4,name=mlrest.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.Defaulter = &LREST{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *LREST) Default() { + lrestlog.Info("Setting default values in LREST spec for : " + r.Name) + + if r.Spec.LRESTPort == 0 { + r.Spec.LRESTPort = 8888 + } + + if r.Spec.Replicas == 0 { + r.Spec.Replicas = 1 + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-lrest,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrests,verbs=create;update,versions=v4,name=vlrest.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.Validator = &LREST{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateCreate() (admission.Warnings, error) { + lrestlog.Info("ValidateCreate", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.ServiceName == "" && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("serviceName"), "Please specify LREST Service name")) + } + + if reflect.ValueOf(r.Spec.LRESTTlsKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestTlsKey"), "Please specify LREST Tls key(secret)")) + } + + if reflect.ValueOf(r.Spec.LRESTTlsCrt).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestTlsCrt"), "Please specify LREST Tls Certificate(secret)")) + } + + /*if r.Spec.SCANName == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for LREST")) + }*/ + + if (r.Spec.DBServer == "" && r.Spec.DBTnsurl == "") || (r.Spec.DBServer != "" && r.Spec.DBTnsurl != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "Please specify Database Server Name/IP Address or tnsalias string")) + } + + if r.Spec.DBTnsurl != "" && (r.Spec.DBServer != "" || r.Spec.DBPort != 0 || r.Spec.ServiceName != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "DBtnsurl is orthogonal to (DBServer,DBport,Services)")) + } + + if r.Spec.DBPort == 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify DB Server Port")) + } + if r.Spec.DBPort < 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.LRESTPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid LREST Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if r.Spec.LRESTImage == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsImage"), "Please specify name of LREST Image to be used")) + } + if reflect.ValueOf(r.Spec.LRESTAdminUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestAdminUser"), "Please specify user in the root container with sysdba priviledges to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.LRESTAdminPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestAdminPwd"), "Please specify password for the LREST Administrator to manage PDB lifecycle")) + } + /* if reflect.ValueOf(r.Spec.LRESTPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPwd"), "Please specify password for user LREST_PUBLIC_USER")) + } */ + if reflect.ValueOf(r.Spec.WebLrestServerUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webLrestServerUser"), "Please specify the Web Server User having SQL Administrator role")) + } + if reflect.ValueOf(r.Spec.WebLrestServerPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify password for the Web Server User having SQL Administrator role")) + } + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LREST"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + lrestlog.Info("validate update", "name", r.Name) + + isLRESTMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isLRESTMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + + // Check for updation errors + oldLREST, ok := old.(*LREST) + if !ok { + return nil, nil + } + + if r.Spec.DBPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.LRESTPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid LREST Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if !strings.EqualFold(oldLREST.Spec.ServiceName, r.Spec.ServiceName) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LREST"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateDelete() (admission.Warnings, error) { + lrestlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/lrpdb_types.go b/apis/database/v4/lrpdb_types.go new file mode 100644 index 00000000..d37bebdc --- /dev/null +++ b/apis/database/v4/lrpdb_types.go @@ -0,0 +1,256 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LRPDBSpec defines the desired state of LRPDB +type LRPDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + LRPDBTlsKey LRPDBTLSKEY `json:"lrpdbTlsKey,omitempty"` + LRPDBTlsCrt LRPDBTLSCRT `json:"lrpdbTlsCrt,omitempty"` + LRPDBTlsCat LRPDBTLSCAT `json:"lrpdbTlsCat,omitempty"` + LRPDBPriKey LRPDBPRVKEY `json:"cdbPrvKey,omitempty"` + + // Namespace of the rest server + CDBNamespace string `json:"cdbNamespace,omitempty"` + // Name of the CDB Custom Resource that runs the LREST container + CDBResName string `json:"cdbResName,omitempty"` + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // The name of the new LRPDB. Relevant for both Create and Plug Actions. + LRPDBName string `json:"pdbName,omitempty"` + // Name of the Source LRPDB from which to clone + SrcLRPDBName string `json:"srcPdbName,omitempty"` + // The administrator username for the new LRPDB. This property is required when the Action property is Create. + AdminName LRPDBAdminName `json:"adminName,omitempty"` + // The administrator password for the new LRPDB. This property is required when the Action property is Create. + AdminPwd LRPDBAdminPassword `json:"adminPwd,omitempty"` + // Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. + AdminpdbUser AdminpdbUser `json:"adminpdbUser,omitempty"` + AdminpdbPass AdminpdbPass `json:"adminpdbPass,omitempty"` + + FileNameConversions string `json:"fileNameConversions,omitempty"` + // This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + SourceFileNameConversions string `json:"sourceFileNameConversions,omitempty"` + // XML metadata filename to be used for Plug or Unplug operations + XMLFileName string `json:"xmlFileName,omitempty"` + // To copy files or not while cloning a LRPDB + // +kubebuilder:validation:Enum=COPY;NOCOPY;MOVE + CopyAction string `json:"copyAction,omitempty"` + // Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). + // +kubebuilder:validation:Enum=INCLUDING;KEEP + DropAction string `json:"dropAction,omitempty"` + // A Path specified for sparse clone snapshot copy. (Optional) + SparseClonePath string `json:"sparseClonePath,omitempty"` + // Whether to reuse temp file + ReuseTempFile *bool `json:"reuseTempFile,omitempty"` + // Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. + UnlimitedStorage *bool `json:"unlimitedStorage,omitempty"` + // Indicate if 'AS CLONE' option should be used in the command to plug in a LRPDB. This property is applicable when the Action property is PLUG but not required. + AsClone *bool `json:"asClone,omitempty"` + // Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TotalSize string `json:"totalSize,omitempty"` + // Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TempSize string `json:"tempSize,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebLrpdbServerUser WebLrpdbServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebLrpdbServerPwd WebLrpdbServerPassword `json:"webServerPwd,omitempt"` + // TDE import for plug operations + LTDEImport *bool `json:"tdeImport,omitempty"` + // LTDE export for unplug operations + LTDEExport *bool `json:"tdeExport,omitempty"` + // TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations + LTDEPassword LTDEPwd `json:"tdePassword,omitempty"` + // LTDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + LTDEKeystorePath string `json:"tdeKeystorePath,omitempty"` + // LTDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + LTDESecret LTDESecret `json:"tdeSecret,omitempty"` + // Whether you need the script only or execute the script + GetScript *bool `json:"getScript,omitempty"` + // Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map/Alter. Map is used to map a Databse LRPDB to a Kubernetes LRPDB CR. + // +kubebuilder:validation:Enum=Create;Clone;Plug;Unplug;Delete;Modify;Status;Map;Alter;Noaction + Action string `json:"action"` + // Extra options for opening and closing a LRPDB + // +kubebuilder:validation:Enum=IMMEDIATE;NORMAL;READ ONLY;READ WRITE;RESTRICTED + ModifyOption string `json:"modifyOption,omitempty"` + // to be used with ALTER option - obsolete do not use + AlterSystem string `json:"alterSystem,omitempty"` + // to be used with ALTER option - the name of the parameter + AlterSystemParameter string `json:"alterSystemParameter"` + // to be used with ALTER option - the value of the parameter + AlterSystemValue string `json:"alterSystemValue"` + // parameter scope + ParameterScope string `json:"parameterScope,omitempty"` + // The target state of the LRPDB + // +kubebuilder:validation:Enum=OPEN;CLOSE;ALTER + LRPDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertiveLrpdbDeletion bool `json:"assertiveLrpdbDeletion,omitempty"` + PDBConfigMap string `json:"pdbconfigmap,omitempty"` +} + +// LRPDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for LRPDB +type LRPDBAdminName struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBAdminPassword defines the secret containing Sys Admin Password mapped to key 'adminPwd' for LRPDB +type LRPDBAdminPassword struct { + Secret LRPDBSecret `json:"secret"` +} + +// TDEPwd defines the secret containing TDE Wallet Password mapped to key 'tdePassword' for LRPDB +type LTDEPwd struct { + Secret LRPDBSecret `json:"secret"` +} + +// TDESecret defines the secret containing TDE Secret to key 'tdeSecret' for LRPDB +type LTDESecret struct { + Secret LRPDBSecret `json:"secret"` +} + +type WebLrpdbServerUser struct { + Secret LRPDBSecret `json:"secret"` +} + +type WebLrpdbServerPassword struct { + Secret LRPDBSecret `json:"secret"` +} + +type AdminpdbUser struct { + Secret LRPDBSecret `json:"secret"` +} + +type AdminpdbPass struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBSecret defines the secretName +type LRPDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +type LRPDBTLSKEY struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBTLSCRT struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBTLSCAT struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBPRVKEY struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBStatus defines the observed state of LRPDB +type LRPDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // LRPDB Connect String + ConnString string `json:"connString,omitempty"` + // Phase of the LRPDB Resource + Phase string `json:"phase"` + // LRPDB Resource Status + Status bool `json:"status"` + // Total size of the LRPDB + TotalSize string `json:"totalSize,omitempty"` + // Open mode of the LRPDB + OpenMode string `json:"openMode,omitempty"` + // Modify Option of the LRPDB + ModifyOption string `json:"modifyOption,omitempty"` + // Message + Msg string `json:"msg,omitempty"` + // Last Completed Action + Action string `json:"action,omitempty"` + // Last Completed alter system + AlterSystem string `json:"alterSystem,omitempty"` + // Last ORA- + SqlCode int `json:"sqlCode"` + Bitstat int `json:"bitstat,omitempty"` /* Bitmask */ + BitStatStr string `json:"bitstatstr,omitempty"` /* Decoded bitmask */ +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" +// +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the LRPDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.sqlCode",name="last sqlcode",type="integer",description="last sqlcode" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" +// +kubebuilder:resource:path=lrpdbs,scope=Namespaced +// +kubebuilder:storageversion + +// LRPDB is the Schema for the pdbs API +type LRPDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LRPDBSpec `json:"spec,omitempty"` + Status LRPDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// LRPDBList contains a list of LRPDB +type LRPDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LRPDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LRPDB{}, &LRPDBList{}) +} diff --git a/apis/database/v4/lrpdb_webhook.go b/apis/database/v4/lrpdb_webhook.go new file mode 100644 index 00000000..d6807926 --- /dev/null +++ b/apis/database/v4/lrpdb_webhook.go @@ -0,0 +1,370 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +/* MODIFIED (MM/DD/YY) +** rcitton 07/14/22 - 33822886 + */ + +package v4 + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var lrpdblog = logf.Log.WithName("lrpdb-webhook") + +func (r *LRPDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + WithValidator(&LRPDB{}). + WithDefaulter(&LRPDB{}). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-lrpdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrpdbs,verbs=create;update,versions=v4,name=mlrpdb.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.CustomDefaulter = &LRPDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *LRPDB) Default(ctx context.Context, obj runtime.Object) error { + pdb, ok := obj.(*LRPDB) + if !ok { + return fmt.Errorf("expected an LRPDB object but got %T", obj) + } + lrpdblog.Info("Setting default values in LRPDB spec for : " + pdb.Name) + + action := strings.ToUpper(pdb.Spec.Action) + + if action == "DELETE" { + if pdb.Spec.DropAction == "" { + pdb.Spec.DropAction = "KEEP" + lrpdblog.Info(" - dropAction : KEEP") + } + } else if action != "MODIFY" && action != "STATUS" { + if pdb.Spec.ReuseTempFile == nil { + pdb.Spec.ReuseTempFile = new(bool) + *pdb.Spec.ReuseTempFile = true + lrpdblog.Info(" - reuseTempFile : " + strconv.FormatBool(*(pdb.Spec.ReuseTempFile))) + } + if pdb.Spec.UnlimitedStorage == nil { + pdb.Spec.UnlimitedStorage = new(bool) + *pdb.Spec.UnlimitedStorage = true + lrpdblog.Info(" - unlimitedStorage : " + strconv.FormatBool(*(pdb.Spec.UnlimitedStorage))) + } + if pdb.Spec.LTDEImport == nil { + pdb.Spec.LTDEImport = new(bool) + *pdb.Spec.LTDEImport = false + lrpdblog.Info(" - tdeImport : " + strconv.FormatBool(*(pdb.Spec.LTDEImport))) + } + if pdb.Spec.LTDEExport == nil { + pdb.Spec.LTDEExport = new(bool) + *pdb.Spec.LTDEExport = false + lrpdblog.Info(" - tdeExport : " + strconv.FormatBool(*(pdb.Spec.LTDEExport))) + } + if pdb.Spec.AsClone == nil { + pdb.Spec.AsClone = new(bool) + *pdb.Spec.AsClone = false + lrpdblog.Info(" - asClone : " + strconv.FormatBool(*(pdb.Spec.AsClone))) + } + } + + if pdb.Spec.GetScript == nil { + pdb.Spec.GetScript = new(bool) + *pdb.Spec.GetScript = false + lrpdblog.Info(" - getScript : " + strconv.FormatBool(*(pdb.Spec.GetScript))) + } + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-lrpdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrpdbs,verbs=create;update,versions=v4,name=vlrpdb.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.CustomValidator = &LRPDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateCreate-Validating LRPDB spec for : " + r.Name) + pdb := obj.(*LRPDB) + + var allErrs field.ErrorList + + r.validateCommon(&allErrs, ctx, *pdb) + + r.validateAction(&allErrs, ctx, *pdb) + + action := strings.ToUpper(pdb.Spec.Action) + + if len(allErrs) == 0 { + lrpdblog.Info("LRPDB Resource : " + r.Name + " successfully validated for Action : " + action) + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LRPDB"}, + r.Name, allErrs) + return nil, nil +} + +// Validate Action for required parameters +func (r *LRPDB) validateAction(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + action := strings.ToUpper(pdb.Spec.Action) + + lrpdblog.Info("Valdiating LRPDB Resource Action : " + action) + + if reflect.ValueOf(pdb.Spec.LRPDBTlsKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsKey"), "Please specify LRPDB Tls Key(secret)")) + } + + if reflect.ValueOf(pdb.Spec.LRPDBTlsCrt).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsCrt"), "Please specify LRPDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(pdb.Spec.LRPDBTlsCat).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsCat"), "Please specify LRPDB Tls Certificate Authority(secret)")) + } + + switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if pdb.Status.OpenMode == "READ WRITE" { + lrpdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, ctx, pdb) + case "CREATE": + if reflect.ValueOf(pdb.Spec.AdminpdbUser).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminpdbUser"), "Please specify LRPDB System Administrator user")) + } + if reflect.ValueOf(pdb.Spec.AdminpdbPass).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminpdbPass"), "Please specify LRPDB System Administrator Password")) + } + if pdb.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if pdb.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if *(pdb.Spec.LTDEImport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + case "CLONE": + // Sample Err: The LRPDB "lrpdb1-clone" is invalid: spec.srcPdbName: Required value: Please specify source LRPDB for Cloning + if pdb.Spec.SrcLRPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("srcPdbName"), "Please specify source LRPDB name for Cloning")) + } + if pdb.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if pdb.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if pdb.Status.OpenMode == "MOUNT" { + lrpdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + case "PLUG": + if pdb.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if pdb.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.SourceFileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("sourceFileNameConversions"), "Please specify a value for sourceFileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.CopyAction == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("copyAction"), "Please specify a value for copyAction. Values can be COPY, NOCOPY or MOVE")) + } + if *(pdb.Spec.LTDEImport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + case "UNPLUG": + if pdb.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if *(pdb.Spec.LTDEExport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + if pdb.Status.OpenMode == "READ WRITE" { + lrpdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, ctx, pdb) + case "MODIFY": + + if pdb.Spec.LRPDBState == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbState"), "Please specify target state of LRPDB")) + } + if pdb.Spec.ModifyOption == "" && pdb.Spec.AlterSystem == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a LRPDB or alter system parameter")) + } + r.CheckObjExistence("MODIFY", allErrs, ctx, pdb) + } +} + +func (r *LRPDB) CheckObjExistence(action string, allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + lrpdblog.Info("Action [" + action + "] checkin " + pdb.Spec.LRPDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("LRPDBName"), " "+pdb.Spec.LRPDBName+" does not exist : action "+action+" failure")) + + } +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateUpdate(ctx context.Context, obj runtime.Object, old runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateUpdate-Validating LRPDB spec for : " + r.Name) + pdb := old.(*LRPDB) + + isLRPDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isLRPDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + action := strings.ToUpper(pdb.Spec.Action) + + // If LRPDB CR has been created and in Ready state, only allow updates if the "action" value has changed as well + if (pdb.Status.Phase == "Ready") && (pdb.Status.Action != "MODIFY") && (pdb.Status.Action != "STATUS") && (pdb.Status.Action != "NOACTION") && (pdb.Status.Action == action) { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("action"), "New action also needs to be specified after LRPDB is in Ready state")) + } else { + + // Check Common Validations + r.validateCommon(&allErrs, ctx, *pdb) + + // Validate required parameters for Action specified + r.validateAction(&allErrs, ctx, *pdb) + + // Check TDE requirements + if (action != "DELETE") && (action != "MODIFY") && (action != "STATUS") && (*(pdb.Spec.LTDEImport) || *(pdb.Spec.LTDEExport)) { + r.validateTDEInfo(&allErrs, ctx, *pdb) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LRPDB"}, + r.Name, allErrs) + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateDelete-Validating LRPDB spec for : " + r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// Validate common specs needed for all LRPDB Actions +func (r *LRPDB) validateCommon(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + lrpdblog.Info("validateCommon", "name", pdb.Name) + + if pdb.Spec.Action == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("action"), "Please specify LRPDB operation to be performed")) + } + if pdb.Spec.CDBResName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbResName"), "Please specify the name of the CDB Kubernetes resource to use for LRPDB operations")) + } + if pdb.Spec.CDBNamespace == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbNamespace"), "Please specify the namespace of the rest server to use for LRPDB operations")) + } + if pdb.Spec.LRPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbName"), "Please specify name of the LRPDB to be created")) + } +} + +// Validate TDE information for Create, Plug and Unplug Actions +func (r *LRPDB) validateTDEInfo(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + lrpdblog.Info("validateTDEInfo", "name", r.Name) + + if reflect.ValueOf(pdb.Spec.LTDEPassword).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdePassword"), "Please specify a value for tdePassword.")) + } + if pdb.Spec.LTDEKeystorePath == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeKeystorePath"), "Please specify a value for tdeKeystorePath.")) + } + if reflect.ValueOf(pdb.Spec.LTDESecret).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeSecret"), "Please specify a value for tdeSecret.")) + } + +} diff --git a/apis/database/v4/oraclerestdataservice_conversion.go b/apis/database/v4/oraclerestdataservice_conversion.go new file mode 100644 index 00000000..a19cdfd5 --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*OracleRestDataService) Hub() {} diff --git a/apis/database/v4/oraclerestdataservice_types.go b/apis/database/v4/oraclerestdataservice_types.go new file mode 100644 index 00000000..20cc7a74 --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_types.go @@ -0,0 +1,158 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OracleRestDataServiceSpec defines the desired state of OracleRestDataService +type OracleRestDataServiceSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + DatabaseRef string `json:"databaseRef"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Image OracleRestDataServiceImage `json:"image,omitempty"` + OrdsPassword OracleRestDataServicePassword `json:"ordsPassword"` + AdminPassword OracleRestDataServicePassword `json:"adminPassword"` + OrdsUser string `json:"ordsUser,omitempty"` + RestEnableSchemas []OracleRestDataServiceRestEnableSchemas `json:"restEnableSchemas,omitempty"` + OracleService string `json:"oracleService,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + Persistence OracleRestDataServicePersistence `json:"persistence,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + + // +k8s:openapi-gen=true + // +kubebuilder:validation:Minimum=1 + Replicas int `json:"replicas,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` +} + +// OracleRestDataServicePersistence defines the storage releated params +type OracleRestDataServicePersistence struct { + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + + // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany + AccessMode string `json:"accessMode,omitempty"` + VolumeName string `json:"volumeName,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` +} + +// OracleRestDataServiceImage defines the Image source and pullSecrets for POD +type OracleRestDataServiceImage struct { + Version string `json:"version,omitempty"` + PullFrom string `json:"pullFrom"` + PullSecrets string `json:"pullSecrets,omitempty"` +} + +// OracleRestDataServicePassword defines the secret containing Password mapped to secretKey +type OracleRestDataServicePassword struct { + SecretName string `json:"secretName"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` +} + +// OracleRestDataServicePDBSchemas defines the PDB Schemas to be ORDS Enabled +type OracleRestDataServiceRestEnableSchemas struct { + PdbName string `json:"pdbName,omitempty"` + SchemaName string `json:"schemaName"` + UrlMapping string `json:"urlMapping,omitempty"` + Enable bool `json:"enable"` +} + +// OracleRestDataServiceStatus defines the observed state of OracleRestDataService +type OracleRestDataServiceStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Status string `json:"status,omitempty"` + DatabaseApiUrl string `json:"databaseApiUrl,omitempty"` + LoadBalancer string `json:"loadBalancer,omitempty"` + DatabaseRef string `json:"databaseRef,omitempty"` + ServiceIP string `json:"serviceIP,omitempty"` + DatabaseActionsUrl string `json:"databaseActionsUrl,omitempty"` + MongoDbApiAccessUrl string `json:"mongoDbApiAccessUrl,omitempty"` + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + ApexConfigured bool `json:"apexConfigured,omitempty"` + ApxeUrl string `json:"apexUrl,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + CommonUsersCreated bool `json:"commonUsersCreated,omitempty"` + Replicas int `json:"replicas,omitempty"` + + Image OracleRestDataServiceImage `json:"image,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.databaseRef",name="Database",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseApiUrl",name="Database API URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseActionsUrl",name="Database Actions URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.apexUrl",name="Apex URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.mongoDbApiAccessUrl",name="MongoDbApi Access URL",type="string" + +// OracleRestDataService is the Schema for the oraclerestdataservices API +// +kubebuilder:storageversion +type OracleRestDataService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OracleRestDataServiceSpec `json:"spec,omitempty"` + Status OracleRestDataServiceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OracleRestDataServiceList contains a list of OracleRestDataService +type OracleRestDataServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OracleRestDataService `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OracleRestDataService{}, &OracleRestDataServiceList{}) +} diff --git a/apis/database/v4/oraclerestdataservice_webhook.go b/apis/database/v4/oraclerestdataservice_webhook.go new file mode 100644 index 00000000..5211528a --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var oraclerestdataservicelog = logf.Log.WithName("oraclerestdataservice-resource") + +func (r *OracleRestDataService) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/ordssrvs_types.go b/apis/database/v4/ordssrvs_types.go new file mode 100644 index 00000000..1fbf820a --- /dev/null +++ b/apis/database/v4/ordssrvs_types.go @@ -0,0 +1,693 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OrdsSrvsSpec defines the desired state of OrdsSrvs +// +kubebuilder:resource:shortName="ords" +type OrdsSrvsSpec struct { + // Specifies the desired Kubernetes Workload + //+kubebuilder:validation:Enum=Deployment;StatefulSet;DaemonSet + //+kubebuilder:default=Deployment + WorkloadType string `json:"workloadType,omitempty"` + // Defines the number of desired Replicas when workloadType is Deployment or StatefulSet + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default=1 + Replicas int32 `json:"replicas,omitempty"` + // Specifies whether to restart pods when Global or Pool configurations change + ForceRestart bool `json:"forceRestart,omitempty"` + // Specifies the ORDS container image + //+kubecbuilder:default=container-registry.oracle.com/database/ords:latest + Image string `json:"image"` + // Specifies the ORDS container image pull policy + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default=IfNotPresent + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // Specifies the Secret Name for pulling the ORDS container image + ImagePullSecrets string `json:"imagePullSecrets,omitempty"` + // Contains settings that are configured across the entire ORDS instance. + GlobalSettings GlobalSettings `json:"globalSettings"` + // Contains settings for individual pools/databases + // Private key + EncPrivKey PasswordSecret `json:"encPrivKey,omitempty"` + PoolSettings []*PoolSettings `json:"poolSettings,omitempty"` + // +k8s:openapi-gen=true + +} + +type GlobalSettings struct { + // Specifies the setting to enable or disable metadata caching. + CacheMetadataEnabled *bool `json:"cache.metadata.enabled,omitempty"` + + // Specifies the duration after a GraphQL schema is not accessed from the cache that it expires. + CacheMetadataGraphQLExpireAfterAccess *time.Duration `json:"cache.metadata.graphql.expireAfterAccess,omitempty"` + + // Specifies the duration after a GraphQL schema is cached that it expires and has to be loaded again. + CacheMetadataGraphQLExpireAfterWrite *time.Duration `json:"cache.metadata.graphql.expireAfterWrite,omitempty"` + + // Specifies the setting to determine for how long a metadata record remains in the cache. + // Longer duration means, it takes longer to view the applied changes. + // The formats accepted are based on the ISO-8601 duration format. + CacheMetadataTimeout *time.Duration `json:"cache.metadata.timeout,omitempty"` + + // Specifies the setting to enable or disable JWKS caching. + CacheMetadataJWKSEnabled *bool `json:"cache.metadata.jwks.enabled,omitempty"` + + // Specifies the initial capacity of the JWKS cache. + CacheMetadataJWKSInitialCapacity *int32 `json:"cache.metadata.jwks.initialCapacity,omitempty"` + + // Specifies the maximum capacity of the JWKS cache. + CacheMetadataJWKSMaximumSize *int32 `json:"cache.metadata.jwks.maximumSize,omitempty"` + + // Specifies the duration after a JWK is not accessed from the cache that it expires. + // By default this is disabled. + CacheMetadataJWKSExpireAfterAccess *time.Duration `json:"cache.metadata.jwks.expireAfterAccess,omitempty"` + + // Specifies the duration after a JWK is cached, that is, it expires and has to be loaded again. + CacheMetadataJWKSExpireAfterWrite *time.Duration `json:"cache.metadata.jwks.expireAfterWrite,omitempty"` + + // Specifies whether the Database API is enabled. + DatabaseAPIEnabled *bool `json:"database.api.enabled,omitempty"` + + // Specifies to disable the Database API administration related services. + // Only applicable when Database API is enabled. + DatabaseAPIManagementServicesDisabled *bool `json:"database.api.management.services.disabled,omitempty"` + + // Specifies how long to wait before retrying an invalid pool. + DBInvalidPoolTimeout *time.Duration `json:"db.invalidPoolTimeout,omitempty"` + + // Specifies the maximum join nesting depth limit for GraphQL queries. + FeatureGraphQLMaxNestingDepth *int32 `json:"feature.grahpql.max.nesting.depth,omitempty"` + + // Specifies the name of the HTTP request header that uniquely identifies the request end to end as + // it passes through the various layers of the application stack. + // In Oracle this header is commonly referred to as the ECID (Entity Context ID). + RequestTraceHeaderName string `json:"request.traceHeaderName,omitempty"` + + // Specifies the maximum number of unsuccessful password attempts allowed. + // Enabled by setting a positive integer value. + SecurityCredentialsAttempts *int32 `json:"security.credentials.attempts,omitempty"` + + // Specifies the period to lock the account that has exceeded maximum attempts. + SecurityCredentialsLockTime *time.Duration `json:"security.credentials.lock.time,omitempty"` + + // Specifies the HTTP listen port. + //+kubebuilder:default:=8080 + StandaloneHTTPPort *int32 `json:"standalone.http.port,omitempty"` + + // Specifies the SSL certificate hostname. + StandaloneHTTPSHost string `json:"standalone.https.host,omitempty"` + + // Specifies the HTTPS listen port. + //+kubebuilder:default:=8443 + StandaloneHTTPSPort *int32 `json:"standalone.https.port,omitempty"` + + // Specifies the period for Standalone Mode to wait until it is gracefully shutdown. + StandaloneStopTimeout *time.Duration `json:"standalone.stop.timeout,omitempty"` + + // Specifies whether to display error messages on the browser. + DebugPrintDebugToScreen *bool `json:"debug.printDebugToScreen,omitempty"` + + // Specifies how the HTTP error responses must be formatted. + // html - Force all responses to be in HTML format + // json - Force all responses to be in JSON format + // auto - Automatically determines most appropriate format for the request (default). + ErrorResponseFormat string `json:"error.responseFormat,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. + // Either icap.port or icap.secure.port are required to have a value. + ICAPPort *int32 `json:"icap.port,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. + // Either icap.port or icap.secure.port are required to have a value. + // If values for both icap.port and icap.secure.port are provided, then the value of icap.port is ignored. + ICAPSecurePort *int32 `json:"icap.secure.port,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) server name or IP address to virus scan files. + // The icap.server is required to have a value. + ICAPServer string `json:"icap.server,omitempty"` + + // Specifies whether procedures are to be logged. + LogProcedure bool `json:"log.procedure,omitempty"` + + // Specifies to enable the API for MongoDB. + //+kubebuider:default=false + MongoEnabled bool `json:"mongo.enabled,omitempty"` + + // Specifies the API for MongoDB listen port. + //+kubebuilder:default:=27017 + MongoPort *int32 `json:"mongo.port,omitempty"` + + // Specifies the maximum idle time for a Mongo connection in milliseconds. + MongoIdleTimeout *time.Duration `json:"mongo.idle.timeout,omitempty"` + + // Specifies the maximum time for a Mongo database operation in milliseconds. + MongoOpTimeout *time.Duration `json:"mongo.op.timeout,omitempty"` + + // If this value is set to true, then the Oracle REST Data Services internal exclusion list is not enforced. + // Oracle recommends that you do not set this value to true. + SecurityDisableDefaultExclusionList *bool `json:"security.disableDefaultExclusionList,omitempty"` + + // Specifies a pattern for procedures, packages, or schema names which are forbidden to be directly executed from a browser. + SecurityExclusionList string `json:"security.exclusionList,omitempty"` + + // Specifies a pattern for procedures, packages, or schema names which are allowed to be directly executed from a browser. + SecurityInclusionList string `json:"security.inclusionList,omitempty"` + + // Specifies the maximum number of cached procedure validations. + // Set this value to 0 to force the validation procedure to be invoked on each request. + SecurityMaxEntries *int32 `json:"security.maxEntries,omitempty"` + + // Specifies whether HTTPS is available in your environment. + SecurityVerifySSL *bool `json:"security.verifySSL,omitempty"` + + // Specifies the context path where ords is located. + //+kubebuilder:default:="/ords" + StandaloneContextPath string `json:"standalone.context.path,omitempty"` + + /************************************************* + * Undocumented + /************************************************/ + + // Specifies that the HTTP Header contains the specified text + // Usually set to 'X-Forwarded-Proto: https' coming from a load-balancer + SecurityHTTPSHeaderCheck string `json:"security.httpsHeaderCheck,omitempty"` + + // Specifies to force HTTPS; this is set to default to false as in real-world TLS should + // terminiate at the LoadBalancer + SecurityForceHTTPS bool `json:"security.forceHTTPS,omitempty"` + + // Specifies to trust Access from originating domains + SecuirtyExternalSessionTrustedOrigins string `json:"security.externalSessionTrustedOrigins,omitempty"` + + /************************************************* + * Customised + /************************************************/ + /* Below are settings with physical path/file locations to be replaced by ConfigMaps/Secrets, Boolean or HardCoded */ + + /* + // Specifies the path to the folder to store HTTP request access logs. + // If not specified, then no access log is generated. + // HARDCODED + // StandaloneAccessLog string `json:"standalone.access.log,omitempty"` + */ + + // Specifies if HTTP request access logs should be enabled + // If enabled, logs will be written to /opt/oracle/sa/log/global + //+kubebuilder:default:=false + EnableStandaloneAccessLog bool `json:"enable.standalone.access.log,omitempty"` + + // Specifies if HTTP request access logs should be enabled + // If enabled, logs will be written to /opt/oracle/sa/log/global + //+kubebuilder:default:=false + EnableMongoAccessLog bool `json:"enable.mongo.access.log,omitempty"` + + /* + //Specifies the SSL certificate path. + // If you are providing the SSL certificate, then you must specify the certificate location. + // Replaced with: CertSecret *CertificateSecret `json:"certSecret,omitempty"` + //StandaloneHTTPSCert string `json:"standalone.https.cert"` + + // Specifies the SSL certificate key path. + // If you are providing the SSL certificate, you must specify the certificate key location. + // Replaced with: CertSecret *CertificateSecret `json:"certSecret,omitempty"` + //StandaloneHTTPSCertKey string `json:"standalone.https.cert.key"` + */ + + // Specifies the Secret containing the SSL Certificates + // Replaces: standalone.https.cert and standalone.https.cert.key + CertSecret *CertificateSecret `json:"certSecret,omitempty"` + + /************************************************* + * Disabled + /************************************************* + // Specifies the comma separated list of host names or IP addresses to identify a specific network + // interface on which to listen. + //+kubebuilder:default:="0.0.0.0" + //StandaloneBinds string `json:"standalone.binds,omitempty"` + // This is disabled as containerised + + // Specifies the file where credentials are stored. + //SecurityCredentialsFile string `json:"security.credentials.file,omitempty"` + // WTF does this do?!?! + + // Points to the location where static resources to be served under the / root server path are located. + // StandaloneDocRoot string `json:"standalone.doc.root,omitempty"` + // Maybe this gets implemented; difficult to predict valid use case + + // Specifies the path to a folder that contains the custom error page. + // ErrorExternalPath string `json:"error.externalPath,omitempty"` + // Can see use-case; but wait for implementation + + // Specifies the Context path where APEX static resources are located. + //+kubebuilder:default:="/i" + // StandaloneStaticContextPath string `json:"standalone.static.context.path,omitempty"` + // Does anyone ever change this? If so, need to also change the APEX install configmap to update path + */ + + // Specifies the path to the folder containing static resources required by APEX. + // StandaloneStaticPath string `json:"standalone.static.path,omitempty"` + // This is disabled as will use the container image path (/opt/oracle/apex/$ORDS_VER/images) + // HARDCODED into the entrypoint + + // Specifies a comma separated list of host names or IP addresses to identify a specific + // network interface on which to listen. + //+kubebuilder:default:="0.0.0.0" + // MongoHost string `json:"mongo.host,omitempty"` + // This is disabled as containerised + + // Specifies the path to the folder where you want to store the API for MongoDB access logs. + // MongoAccessLog string `json:"mongo.access.log,omitempty"` + // HARDCODED to global/logs +} + +type PoolSettings struct { + // Specifies the Pool Name + PoolName string `json:"poolName"` + + // Specify whether to perform ORDS installation/upgrades automatically + // The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored + // This setting will be ignored for ADB + //+kubebuilder:default:=false + AutoUpgradeORDS bool `json:"autoUpgradeORDS,omitempty"` + + // Specify whether to perform APEX installation/upgrades automatically + // The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored + // This setting will be ignored for ADB + //+kubebuilder:default:=false + AutoUpgradeAPEX bool `json:"autoUpgradeAPEX,omitempty"` + + // Specifies the name of the database user for the connection. + // For non-ADB this will default to ORDS_PUBLIC_USER + // For ADBs this must be specified and not ORDS_PUBLIC_USER + // If ORDS_PUBLIC_USER is specified for an ADB, the workload will fail + //+kubebuilder:default:="ORDS_PUBLIC_USER" + DBUsername string `json:"db.username,omitempty"` + + // Specifies the password of the specified database user. + // Replaced by: DBSecret PasswordSecret `json:"dbSecret"` + // DBPassword struct{} `json:"dbPassword,omitempty"` + + // Specifies the Secret with the dbUsername and dbPassword values + // for the connection. + DBSecret PasswordSecret `json:"db.secret"` + + // Specifies the username for the database account that ORDS uses for administration operations in the database. + DBAdminUser string `json:"db.adminUser,omitempty"` + + // Specifies the password for the database account that ORDS uses for administration operations in the database. + // Replaced by: DBAdminUserSecret PasswordSecret `json:"dbAdminUserSecret,omitempty"` + // DBAdminUserPassword struct{} `json:"db.adminUser.password,omitempty"` + + // Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values + // for the database account that ORDS uses for administration operations in the database. + // replaces: db.adminUser.password + DBAdminUserSecret PasswordSecret `json:"db.adminUser.secret,omitempty"` + + // Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + DBCDBAdminUser string `json:"db.cdb.adminUser,omitempty"` + + // Specifies the password for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + // Replaced by: DBCdbAdminUserSecret PasswordSecret `json:"dbCdbAdminUserSecret,omitempty"` + // DBCdbAdminUserPassword struct{} `json:"db.cdb.adminUser.password,omitempty"` + + // Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values + // Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + // Replaces: db.cdb.adminUser.password + DBCDBAdminUserSecret PasswordSecret `json:"db.cdb.adminUser.secret,omitempty"` + + // Specifies the comma delimited list of additional roles to assign authenticated APEX administrator type users. + ApexSecurityAdministratorRoles string `json:"apex.security.administrator.roles,omitempty"` + + // Specifies the comma delimited list of additional roles to assign authenticated regular APEX users. + ApexSecurityUserRoles string `json:"apex.security.user.roles,omitempty"` + + // Specifies the source for database credentials when creating a direct connection for running SQL statements. + // Value can be one of pool or request. + // If the value is pool, then the credentials defined in this pool is used to create a JDBC connection. + // If the value request is used, then the credentials in the request is used to create a JDBC connection and if successful, grants the requestor SQL Developer role. + //+kubebuilder:validation:Enum=pool;request + DBCredentialsSource string `json:"db.credentialsSource,omitempty"` + + // Indicates how long to wait to gracefully destroy a pool before moving to forcefully destroy all connections including borrowed ones. + DBPoolDestroyTimeout *time.Duration `json:"db.poolDestroyTimeout,omitempty"` + + // Specifies to enable tracking of JDBC resources. + // If not released causes in resource leaks or exhaustion in the database. + // Tracking imposes a performance overhead. + DebugTrackResources *bool `json:"debug.trackResources,omitempty"` + + // Specifies to disable the Open Service Broker services available for the pool. + FeatureOpenservicebrokerExclude *bool `json:"feature.openservicebroker.exclude,omitempty"` + + // Specifies to enable the Database Actions feature. + FeatureSDW *bool `json:"feature.sdw,omitempty"` + + // Specifies a comma separated list of HTTP Cookies to exclude when initializing an Oracle Web Agent environment. + HttpCookieFilter string `json:"http.cookie.filter,omitempty"` + + // Identifies the database role that indicates that the database user must get the SQL Administrator role. + JDBCAuthAdminRole string `json:"jdbc.auth.admin.role,omitempty"` + + // Specifies how a pooled JDBC connection and corresponding database session, is released when a request has been processed. + JDBCCleanupMode string `json:"jdbc.cleanup.mode,omitempty"` + + // If it is true, then it causes a trace of the SQL statements performed by Oracle Web Agent to be echoed to the log. + OwaTraceSql *bool `json:"owa.trace.sql,omitempty"` + + // Indicates if the PL/SQL Gateway functionality should be available for a pool or not. + // Value can be one of disabled, direct, or proxied. + // If the value is direct, then the pool serves the PL/SQL Gateway requests directly. + // If the value is proxied, the PLSQL_GATEWAY_CONFIG view is used to determine the user to whom to proxy. + //+kubebuilder:validation:Enum=disabled;direct;proxied + PlsqlGatewayMode string `json:"plsql.gateway.mode,omitempty"` + + // Specifies whether the JWT Profile authentication is available. Supported values: + SecurityJWTProfileEnabled *bool `json:"security.jwt.profile.enabled,omitempty"` + + // Specifies the maximum number of bytes read from the JWK url. + SecurityJWKSSize *int32 `json:"security.jwks.size,omitempty"` + + // Specifies the maximum amount of time before timing-out when accessing a JWK url. + SecurityJWKSConnectionTimeout *time.Duration `json:"security.jwks.connection.timeout,omitempty"` + + // Specifies the maximum amount of time reading a response from the JWK url before timing-out. + SecurityJWKSReadTimeout *time.Duration `json:"security.jwks.read.timeout,omitempty"` + + // Specifies the minimum interval between refreshing the JWK cached value. + SecurityJWKSRefreshInterval *time.Duration `json:"security.jwks.refresh.interval,omitempty"` + + // Specifies the maximum skew the JWT time claims are accepted. + // This is useful if the clock on the JWT issuer and ORDS differs by a few seconds. + SecurityJWTAllowedSkew *time.Duration `json:"security.jwt.allowed.skew,omitempty"` + + // Specifies the maximum allowed age of a JWT in seconds, regardless of expired claim. + // The age of the JWT is taken from the JWT issued at claim. + SecurityJWTAllowedAge *time.Duration `json:"security.jwt.allowed.age,omitempty"` + + // Indicates the type of security.requestValidationFunction: javascript or plsql. + //+kubebuilder:validation:Enum=plsql;javascript + SecurityValidationFunctionType string `json:"security.validationFunctionType,omitempty"` + + // The type of connection. + //+kubebuilder:validation:Enum=basic;tns;customurl + DBConnectionType string `json:"db.connectionType,omitempty"` + + // Specifies the JDBC URL connection to connect to the database. + DBCustomURL string `json:"db.customURL,omitempty"` + + // Specifies the host system for the Oracle database. + DBHostname string `json:"db.hostname,omitempty"` + + // Specifies the database listener port. + DBPort *int32 `json:"db.port,omitempty"` + + // Specifies the network service name of the database. + DBServicename string `json:"db.servicename,omitempty"` + + // Specifies the name of the database. + DBSid string `json:"db.sid,omitempty"` + + // Specifies the TNS alias name that matches the name in the tnsnames.ora file. + DBTnsAliasName string `json:"db.tnsAliasName,omitempty"` + + // Specifies the service name in the wallet archive for the pool. + DBWalletZipService string `json:"db.wallet.zip.service,omitempty"` + + // Specifies the JDBC driver type. + //+kubebuilder:validation:Enum=thin;oci8 + JDBCDriverType string `json:"jdbc.DriverType,omitempty"` + + // Specifies how long an available connection can remain idle before it is closed. The inactivity connection timeout is in seconds. + JDBCInactivityTimeout *int32 `json:"jdbc.InactivityTimeout,omitempty"` + + // Specifies the initial size for the number of connections that will be created. + // The default is low, and should probably be set higher in most production environments. + JDBCInitialLimit *int32 `json:"jdbc.InitialLimit,omitempty"` + + // Specifies the maximum number of times to reuse a connection before it is discarded and replaced with a new connection. + JDBCMaxConnectionReuseCount *int32 `json:"jdbc.MaxConnectionReuseCount,omitempty"` + + // Sets the maximum connection reuse time property. + JDBCMaxConnectionReuseTime *int32 `json:"jdbc.MaxConnectionReuseTime,omitempty"` + + // Sets the time in seconds to trust an idle connection to skip a validation test. + JDBCSecondsToTrustIdleConnection *int32 `json:"jdbc.SecondsToTrustIdleConnection,omitempty"` + + // Specifies the maximum number of connections. + // Might be too low for some production environments. + JDBCMaxLimit *int32 `json:"jdbc.MaxLimit,omitempty"` + + // Specifies if the PL/SQL Gateway calls can be authenticated using database users. + // If the value is true then this feature is enabled. If the value is false, then this feature is disabled. + // Oracle recommends not to use this feature. + // This feature used only to facilitate customers migrating from mod_plsql. + JDBCAuthEnabled *bool `json:"jdbc.auth.enabled,omitempty"` + + // Specifies the maximum number of statements to cache for each connection. + JDBCMaxStatementsLimit *int32 `json:"jdbc.MaxStatementsLimit,omitempty"` + + // Specifies the minimum number of connections. + JDBCMinLimit *int32 `json:"jdbc.MinLimit,omitempty"` + + // Specifies a timeout period on a statement. + // An abnormally long running query or script, executed by a request, may leave it in a hanging state unless a timeout is + // set on the statement. Setting a timeout on the statement ensures that all the queries automatically timeout if + // they are not completed within the specified time period. + JDBCStatementTimeout *int32 `json:"jdbc.statementTimeout,omitempty"` + + // Specifies the default page to display. The Oracle REST Data Services Landing Page. + MiscDefaultPage string `json:"misc.defaultPage,omitempty"` + + // Specifies the maximum number of rows that will be returned from a query when processing a RESTful service + // and that will be returned from a nested cursor in a result set. + // Affects all RESTful services generated through a SQL query, regardless of whether the resource is paginated. + MiscPaginationMaxRows *int32 `json:"misc.pagination.maxRows,omitempty"` + + // Specifies the procedure name(s) to execute after executing the procedure specified on the URL. + // Multiple procedure names must be separated by commas. + ProcedurePostProcess string `json:"procedurePostProcess,omitempty"` + + // Specifies the procedure name(s) to execute prior to executing the procedure specified on the URL. + // Multiple procedure names must be separated by commas. + ProcedurePreProcess string `json:"procedure.preProcess,omitempty"` + + // Specifies the function to be invoked prior to dispatching each Oracle REST Data Services based REST Service. + // The function can perform configuration of the database session, perform additional validation or authorization of the request. + // If the function returns true, then processing of the request continues. + // If the function returns false, then processing of the request is aborted and an HTTP 403 Forbidden status is returned. + ProcedureRestPreHook string `json:"procedure.rest.preHook,omitempty"` + + // Specifies an authentication function to determine if the requested procedure in the URL should be allowed or disallowed for processing. + // The function should return true if the procedure is allowed; otherwise, it should return false. + // If it returns false, Oracle REST Data Services will return WWW-Authenticate in the response header. + SecurityRequestAuthenticationFunction string `json:"security.requestAuthenticationFunction,omitempty"` + + // Specifies a validation function to determine if the requested procedure in the URL should be allowed or disallowed for processing. + // The function should return true if the procedure is allowed; otherwise, return false. + //+kubebuilder:default:="ords_util.authorize_plsql_gateway" + SecurityRequestValidationFunction string `json:"security.requestValidationFunction,omitempty"` + + // When using the SODA REST API, specifies the default number of documents returned for a GET request on a collection when a + // limit is not specified in the URL. Must be a positive integer, or "unlimited" for no limit. + SODADefaultLimit string `json:"soda.defaultLimit,omitempty"` + + // When using the SODA REST API, specifies the maximum number of documents that will be returned for a GET request on a collection URL, + // regardless of any limit specified in the URL. Must be a positive integer, or "unlimited" for no limit. + SODAMaxLimit string `json:"soda.maxLimit,omitempty"` + + // Specifies whether the REST-Enabled SQL service is active. + RestEnabledSqlActive *bool `json:"restEnabledSql.active,omitempty"` + + /************************************************* + * Customised + /************************************************/ + /* Below are settings with physical path/file locations to be replaced by ConfigMaps/Secrets, Boolean or HardCoded */ + + /* + // Specifies the wallet archive (provided in BASE64 encoding) containing connection details for the pool. + // Replaced with: DBWalletSecret *DBWalletSecret `json:"dbWalletSecret,omitempty"` + DBWalletZip string `json:"db.wallet.zip,omitempty"` + + // Specifies the path to a wallet archive containing connection details for the pool. + // HARDCODED + DBWalletZipPath string `json:"db.wallet.zip.path,omitempty"` + */ + + // Specifies the Secret containing the wallet archive containing connection details for the pool. + // Replaces: db.wallet.zip + DBWalletSecret *DBWalletSecret `json:"dbWalletSecret,omitempty"` + + /* + // The directory location of your tnsnames.ora file. + // Replaced with: TNSAdminSecret *TNSAdminSecret `json:"tnsAdminSecret,omitempty"` + // DBTnsDirectory string `json:"db.tnsDirectory,omitempty"` + */ + + // Specifies the Secret containing the TNS_ADMIN directory + // Replaces: db.tnsDirectory + TNSAdminSecret *TNSAdminSecret `json:"tnsAdminSecret,omitempty"` + + /************************************************* + * Disabled + /************************************************* + // specifies a configuration setting for AutoUpgrade.jar location. + // AutoupgradeAPIAulocation string `json:"autoupgrade.api.aulocation,omitempty"` + // As of 23.4; AutoUpgrade.jar is not part of the container image + + // Specifies a configuration setting to enable AutoUpgrade REST API features. + // AutoupgradeAPIEnabled *bool `json:"autoupgrade.api.enabled,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies a configuration setting for AutoUpgrade REST API JVM location. + // AutoupgradeAPIJvmlocation string `json:"autoupgrade.api.jvmlocation,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies a configuration setting for AutoUpgrade REST API log location. + // AutoupgradeAPILoglocation string `json:"autoupgrade.api.loglocation,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies that the pool points to a CDB, and that the PDBs connected to that CDB should be made addressable + // by Oracle REST Data Services + // DBServiceNameSuffix string `json:"db.serviceNameSuffix,omitempty"` + // Not sure of use case here?!? + */ +} + +type PriVKey struct { + Secret PasswordSecret `json:"secret"` +} + +// Defines the secret containing Password mapped to secretKey +type PasswordSecret struct { + // Specifies the name of the password Secret + SecretName string `json:"secretName"` + // Specifies the key holding the value of the Secret + //+kubebuilder:default:="password" + PasswordKey string `json:"passwordKey,omitempty"` +} + +// Defines the secret containing Certificates +type CertificateSecret struct { + // Specifies the name of the certificate Secret + SecretName string `json:"secretName"` + // Specifies the Certificate + Certificate string `json:"cert"` + // Specifies the Certificate Key + CertificateKey string `json:"key"` +} + +// Defines the secret containing Certificates +type TNSAdminSecret struct { + // Specifies the name of the TNS_ADMIN Secret + SecretName string `json:"secretName"` +} + +// Defines the secret containing Certificates +type DBWalletSecret struct { + // Specifies the name of the Database Wallet Secret + SecretName string `json:"secretName"` + // Specifies the Secret key name containing the Wallet + WalletName string `json:"walletName"` +} + +// OrdsSrvsStatus defines the observed state of OrdsSrvs +type OrdsSrvsStatus struct { + //** PLACE HOLDER + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + //** PLACE HOLDER + // Indicates the current status of the resource + Status string `json:"status,omitempty"` + // Indicates the current Workload type of the resource + WorkloadType string `json:"workloadType,omitempty"` + // Indicates the ORDS version + ORDSVersion string `json:"ordsVersion,omitempty"` + // Indicates the HTTP port of the resource exposed by the pods + HTTPPort *int32 `json:"httpPort,omitempty"` + // Indicates the HTTPS port of the resource exposed by the pods + HTTPSPort *int32 `json:"httpsPort,omitempty"` + // Indicates the MongoAPI port of the resource exposed by the pods (if enabled) + MongoPort int32 `json:"mongoPort,omitempty"` + // Indicates if the resource is out-of-sync with the configuration + RestartRequired bool `json:"restartRequired"` + + // +operator-sdk:csv:customresourcedefinitions:type=status + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".status.status",name="status",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.workloadType",name="workloadType",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.ordsVersion",name="ordsVersion",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.httpPort",name="httpPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.httpsPort",name="httpsPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.mongoPort",name="MongoPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.restartRequired",name="restartRequired",type="boolean" +//+kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="AGE",type="date" +//+kubebuilder:printcolumn:JSONPath=".status.ordsInstalled",name="OrdsInstalled",type="boolean" +//+kubebuilder:resource:path=ordssrvs,scope=Namespaced + +// OrdsSrvs is the Schema for the ordssrvs API +type OrdsSrvs struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OrdsSrvsSpec `json:"spec,omitempty"` + Status OrdsSrvsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OrdsSrvsList contains a list of OrdsSrvs +type OrdsSrvsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OrdsSrvs `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OrdsSrvs{}, &OrdsSrvsList{}) +} diff --git a/apis/database/v4/pdb_types.go b/apis/database/v4/pdb_types.go new file mode 100644 index 00000000..16021f12 --- /dev/null +++ b/apis/database/v4/pdb_types.go @@ -0,0 +1,237 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PDBSpec defines the desired state of PDB +type PDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PDBTlsKey PDBTLSKEY `json:"pdbTlsKey,omitempty"` + PDBTlsCrt PDBTLSCRT `json:"pdbTlsCrt,omitempty"` + PDBTlsCat PDBTLSCAT `json:"pdbTlsCat,omitempty"` + + // CDB Namespace + CDBNamespace string `json:"cdbNamespace,omitempty"` + // Name of the CDB Custom Resource that runs the ORDS container + CDBResName string `json:"cdbResName,omitempty"` + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // The name of the new PDB. Relevant for both Create and Plug Actions. + PDBName string `json:"pdbName,omitempty"` + // Name of the Source PDB from which to clone + SrcPDBName string `json:"srcPdbName,omitempty"` + // The administrator username for the new PDB. This property is required when the Action property is Create. + AdminName PDBAdminName `json:"adminName,omitempty"` + // The administrator password for the new PDB. This property is required when the Action property is Create. + AdminPwd PDBAdminPassword `json:"adminPwd,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUsr WebServerUserPDB `json:"webServerUser,omitempty"` + // Password for the Web ServerPDB User + WebServerPwd WebServerPasswordPDB `json:"webServerPwd,omitempty"` + // Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. + FileNameConversions string `json:"fileNameConversions,omitempty"` + // This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + SourceFileNameConversions string `json:"sourceFileNameConversions,omitempty"` + // XML metadata filename to be used for Plug or Unplug operations + XMLFileName string `json:"xmlFileName,omitempty"` + // To copy files or not while cloning a PDB + // +kubebuilder:validation:Enum=COPY;NOCOPY;MOVE + CopyAction string `json:"copyAction,omitempty"` + // Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). + // +kubebuilder:validation:Enum=INCLUDING;KEEP + DropAction string `json:"dropAction,omitempty"` + // A Path specified for sparse clone snapshot copy. (Optional) + SparseClonePath string `json:"sparseClonePath,omitempty"` + // Whether to reuse temp file + ReuseTempFile *bool `json:"reuseTempFile,omitempty"` + // Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. + UnlimitedStorage *bool `json:"unlimitedStorage,omitempty"` + // Indicate if 'AS CLONE' option should be used in the command to plug in a PDB. This property is applicable when the Action property is PLUG but not required. + AsClone *bool `json:"asClone,omitempty"` + // Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TotalSize string `json:"totalSize,omitempty"` + // Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TempSize string `json:"tempSize,omitempty"` + // TDE import for plug operations + TDEImport *bool `json:"tdeImport,omitempty"` + // TDE export for unplug operations + TDEExport *bool `json:"tdeExport,omitempty"` + // TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations + TDEPassword TDEPwd `json:"tdePassword,omitempty"` + // TDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDEKeystorePath string `json:"tdeKeystorePath,omitempty"` + // TDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDESecret TDESecret `json:"tdeSecret,omitempty"` + // Whether you need the script only or execute the script + GetScript *bool `json:"getScript,omitempty"` + // Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. Map is used to map a Databse PDB to a Kubernetes PDB CR. + // +kubebuilder:validation:Enum=Create;Clone;Plug;Unplug;Delete;Modify;Status;Map + Action string `json:"action"` + // Extra options for opening and closing a PDB + // +kubebuilder:validation:Enum=IMMEDIATE;NORMAL;READ ONLY;READ WRITE;RESTRICTED + ModifyOption string `json:"modifyOption,omitempty"` + // The target state of the PDB + // +kubebuilder:validation:Enum=OPEN;CLOSE + PDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` + PDBPubKey PDBPUBKEY `json:"pdbOrdsPubKey,omitempty"` + PDBPriKey PDBPRIVKEY `json:"pdbOrdsPrvKey,omitempty"` +} + +// PDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for PDB +type PDBAdminName struct { + Secret PDBSecret `json:"secret"` +} + +// PDBAdminPassword defines the secret containing Sys Admin Password mapped to key 'adminPwd' for PDB +type PDBAdminPassword struct { + Secret PDBSecret `json:"secret"` +} + +// TDEPwd defines the secret containing TDE Wallet Password mapped to key 'tdePassword' for PDB +type TDEPwd struct { + Secret PDBSecret `json:"secret"` +} + +// TDESecret defines the secret containing TDE Secret to key 'tdeSecret' for PDB +type TDESecret struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle + +type WebServerUserPDB struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPasswordPDB struct { + Secret PDBSecret `json:"secret"` +} + +// PDBSecret defines the secretName +type PDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +type PDBTLSKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCRT struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCAT struct { + Secret PDBSecret `json:"secret"` +} + +// PDBStatus defines the observed state of PDB +type PDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // PDB Connect String + ConnString string `json:"connString,omitempty"` + // Phase of the PDB Resource + Phase string `json:"phase"` + // PDB Resource Status + Status bool `json:"status"` + // Total size of the PDB + TotalSize string `json:"totalSize,omitempty"` + // Open mode of the PDB + OpenMode string `json:"openMode,omitempty"` + // Modify Option of the PDB + ModifyOption string `json:"modifyOption,omitempty"` + // Message + Msg string `json:"msg,omitempty"` + // Last Completed Action + Action string `json:"action,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" +// +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the PDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" +// +kubebuilder:resource:path=pdbs,scope=Namespaced +// +kubebuilder:storageversion + +// PDB is the Schema for the pdbs API +type PDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PDBSpec `json:"spec,omitempty"` + Status PDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// PDBList contains a list of PDB +type PDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PDB `json:"items"` +} + +type PDBPUBKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBPRIVKEY struct { + Secret PDBSecret `json:"secret"` +} + +func init() { + SchemeBuilder.Register(&PDB{}, &PDBList{}) +} diff --git a/apis/database/v4/pdb_webhook.go b/apis/database/v4/pdb_webhook.go new file mode 100644 index 00000000..f651accf --- /dev/null +++ b/apis/database/v4/pdb_webhook.go @@ -0,0 +1,369 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +/* MODIFIED (MM/DD/YY) +** rcitton 07/14/22 - 33822886 + */ + +package v4 + +import ( + "reflect" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var pdblog = logf.Log.WithName("pdb-webhook") + +func (r *PDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-pdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=mpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &PDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *PDB) Default() { + pdblog.Info("Setting default values in PDB spec for : " + r.Name) + + action := strings.ToUpper(r.Spec.Action) + + if action == "DELETE" { + if r.Spec.DropAction == "" { + r.Spec.DropAction = "INCLUDING" + pdblog.Info(" - dropAction : INCLUDING") + } + } else if action != "MODIFY" && action != "STATUS" { + if r.Spec.ReuseTempFile == nil { + r.Spec.ReuseTempFile = new(bool) + *r.Spec.ReuseTempFile = true + pdblog.Info(" - reuseTempFile : " + strconv.FormatBool(*(r.Spec.ReuseTempFile))) + } + if r.Spec.UnlimitedStorage == nil { + r.Spec.UnlimitedStorage = new(bool) + *r.Spec.UnlimitedStorage = true + pdblog.Info(" - unlimitedStorage : " + strconv.FormatBool(*(r.Spec.UnlimitedStorage))) + } + if r.Spec.TDEImport == nil { + r.Spec.TDEImport = new(bool) + *r.Spec.TDEImport = false + pdblog.Info(" - tdeImport : " + strconv.FormatBool(*(r.Spec.TDEImport))) + } + if r.Spec.TDEExport == nil { + r.Spec.TDEExport = new(bool) + *r.Spec.TDEExport = false + pdblog.Info(" - tdeExport : " + strconv.FormatBool(*(r.Spec.TDEExport))) + } + if r.Spec.AsClone == nil { + r.Spec.AsClone = new(bool) + *r.Spec.AsClone = false + pdblog.Info(" - asClone : " + strconv.FormatBool(*(r.Spec.AsClone))) + } + + } + + if r.Spec.GetScript == nil { + r.Spec.GetScript = new(bool) + *r.Spec.GetScript = false + pdblog.Info(" - getScript : " + strconv.FormatBool(*(r.Spec.GetScript))) + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-pdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=vpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &PDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateCreate() (admission.Warnings, error) { + pdblog.Info("ValidateCreate-Validating PDB spec for : " + r.Name) + + var allErrs field.ErrorList + + r.validateCommon(&allErrs) + + r.validateAction(&allErrs) + + action := strings.ToUpper(r.Spec.Action) + + if len(allErrs) == 0 { + pdblog.Info("PDB Resource : " + r.Name + " successfully validated for Action : " + action) + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// Validate Action for required parameters +func (r *PDB) validateAction(allErrs *field.ErrorList) { + action := strings.ToUpper(r.Spec.Action) + + pdblog.Info("Valdiating PDB Resource Action : " + action) + + if reflect.ValueOf(r.Spec.PDBTlsKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsKey"), "Please specify PDB Tls Key(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCrt).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCrt"), "Please specify PDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCat).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCat"), "Please specify PDB Tls Certificate Authority(secret)")) + } + if reflect.ValueOf(r.Spec.PDBPriKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbOrdsPrvKey"), "Please specify PDB Tls Certificate Authority(secret)")) + } + + switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, r) + case "CREATE": + if reflect.ValueOf(r.Spec.AdminName).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminName"), "Please specify PDB System Administrator user")) + } + if reflect.ValueOf(r.Spec.AdminPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminPwd"), "Please specify PDB System Administrator Password")) + } + if reflect.ValueOf(r.Spec.WebServerUsr).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("WebServerUser"), "Please specify the http webServerUser")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify the http webserverPassword")) + } + + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "CLONE": + // Sample Err: The PDB "pdb1-clone" is invalid: spec.srcPdbName: Required value: Please specify source PDB for Cloning + if r.Spec.SrcPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("srcPdbName"), "Please specify source PDB name for Cloning")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + /* We don't need this check as ords open the pdb before cloninig */ + /* + if r.Status.OpenMode == "MOUNTED" { + pdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + */ + case "PLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.SourceFileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("sourceFileNameConversions"), "Please specify a value for sourceFileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.CopyAction == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("copyAction"), "Please specify a value for copyAction. Values can be COPY, NOCOPY or MOVE")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "UNPLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if *(r.Spec.TDEExport) { + r.validateTDEInfo(allErrs) + } + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, r) + case "MODIFY": + if r.Spec.PDBState == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbState"), "Please specify target state of PDB")) + } + if r.Spec.ModifyOption == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a PDB")) + } + r.CheckObjExistence("MODIY", allErrs, r) + } +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + pdblog.Info("ValidateUpdate-Validating PDB spec for : " + r.Name) + + isPDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isPDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + action := strings.ToUpper(r.Spec.Action) + + // If PDB CR has been created and in Ready state, only allow updates if the "action" value has changed as well + if (r.Status.Phase == "Ready") && (r.Status.Action != "MODIFY") && (r.Status.Action != "STATUS") && (r.Status.Action == action) { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("action"), "New action also needs to be specified after PDB is in Ready state")) + } else { + + // Check Common Validations + r.validateCommon(&allErrs) + + // Validate required parameters for Action specified + r.validateAction(&allErrs) + + // Check TDE requirements + if (action != "DELETE") && (action != "MODIFY") && (action != "STATUS") && (*(r.Spec.TDEImport) || *(r.Spec.TDEExport)) { + r.validateTDEInfo(&allErrs) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateDelete() (admission.Warnings, error) { + pdblog.Info("ValidateDelete-Validating PDB spec for : " + r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// Validate common specs needed for all PDB Actions +func (r *PDB) validateCommon(allErrs *field.ErrorList) { + pdblog.Info("validateCommon", "name", r.Name) + + if r.Spec.Action == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("action"), "Please specify PDB operation to be performed")) + } + if r.Spec.CDBResName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbResName"), "Please specify the name of the CDB Kubernetes resource to use for PDB operations")) + } + if r.Spec.PDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbName"), "Please specify name of the PDB to be created")) + } +} + +// Validate TDE information for Create, Plug and Unplug Actions +func (r *PDB) validateTDEInfo(allErrs *field.ErrorList) { + pdblog.Info("validateTDEInfo", "name", r.Name) + + if reflect.ValueOf(r.Spec.TDEPassword).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdePassword"), "Please specify a value for tdePassword.")) + } + if r.Spec.TDEKeystorePath == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeKeystorePath"), "Please specify a value for tdeKeystorePath.")) + } + if reflect.ValueOf(r.Spec.TDESecret).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeSecret"), "Please specify a value for tdeSecret.")) + } + +} + +func (r *PDB) CheckObjExistence(action string, allErrs *field.ErrorList, pdb *PDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + pdblog.Info("Action [" + action + "] checkin " + pdb.Spec.PDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("PDBName"), " "+pdb.Spec.PDBName+" does not exist : action "+action+" failure")) + + } +} diff --git a/apis/database/v4/shardingdatabase_conversion.go b/apis/database/v4/shardingdatabase_conversion.go new file mode 100644 index 00000000..7b2c17ac --- /dev/null +++ b/apis/database/v4/shardingdatabase_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*ShardingDatabase) Hub() {} diff --git a/apis/database/v4/shardingdatabase_types.go b/apis/database/v4/shardingdatabase_types.go new file mode 100644 index 00000000..cc01b24d --- /dev/null +++ b/apis/database/v4/shardingdatabase_types.go @@ -0,0 +1,427 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "sync" + + "encoding/json" + + "sigs.k8s.io/controller-runtime/pkg/client" + + annsv1 "github.com/oracle/oracle-database-operator/commons/annotations" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ShardingDatabaseSpec defines the desired state of ShardingDatabase +type ShardingDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Shard []ShardSpec `json:"shard"` + Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters + Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter + StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name + DbImage string `json:"dbImage"` // Accept DB Image name + DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + GsmImage string `json:"gsmImage"` // Acccept the GSM image name + GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster + PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least + IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining + IsExternalSvc bool `json:"isExternalSvc,omitempty"` + IsClone bool `json:"isClone,omitempty"` + IsDataGuard bool `json:"isDataGuard,omitempty"` + ScriptsLocation string `json:"scriptsLocation,omitempty"` + IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + LivenessCheckPeriod int `json:"liveinessCheckPeriod,omitempty"` + ReplicationType string `json:"replicationType,omitempty"` + IsDownloadScripts bool `json:"isDownloadScripts,omitempty"` + InvitedNodeSubnetFlag string `json:"invitedNodeSubnetFlag,omitempty"` + InvitedNodeSubnet string `json:"InvitedNodeSubnet,omitempty"` + ShardingType string `json:"shardingType,omitempty"` + GsmShardSpace []GsmShardSpaceSpec `json:"gsmShardSpace,omitempty"` + GsmShardGroup []GsmShardGroupSpec `json:"gsmShardGroup,omitempty"` + ShardRegion []string `json:"shardRegion,omitempty"` + ShardBuddyRegion string `json:"shardBuddyRegion,omitempty"` + GsmService []GsmServiceSpec `json:"gsmService,omitempty"` + ShardConfigName string `json:"shardConfigName,omitempty"` + GsmDevMode string `json:"gsmDevMode,omitempty"` + DbSecret *SecretDetails `json:"dbSecret,omitempty"` // Secret Name to be used with Shard + IsTdeWallet string `json:"isTdeWallet,omitempty"` + TdeWalletPvc string `json:"tdeWalletPvc,omitempty"` + FssStorageClass string `json:"fssStorageClass,omitempty"` + TdeWalletPvcMountLocation string `json:"tdeWalletPvcMountLocation,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + TopicId string `json:"topicId,omitempty"` +} + +// To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 +// ShardingDatabaseStatus defines the observed state of ShardingDatabase +type ShardingDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Shard map[string]string `json:"shards,omitempty"` + Catalog map[string]string `json:"catalogs,omitempty"` + + Gsm GsmStatus `json:"gsm,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + CrdStatus []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +type GsmStatus struct { + InternalconnectStr string `json:"internalConnectStr,omitempty"` + ExternalConnectStr string `json:"externalConnectStr,omitempty"` + State string `json:"state,omitempty"` + Shards map[string]string `json:"shards,omitempty"` + Details map[string]string `json:"details,omitempty"` + Services string `json:"services,omitempty"` +} + +type GsmShardDetails struct { + Name string `json:"name,omitempty"` + Available string `json:"available,omitempty"` + State string `json:"State,omitempty"` +} + +type GsmStatusDetails struct { + Name string `json:"name,omitempty"` + K8sInternalSvc string `json:"k8sInternalSvc,omitempty"` + K8sExternalSvc string `json:"k8sExternalSvc,omitempty"` + K8sInternalSvcIP string `json:"k8sInternalIP,omitempty"` + K8sExternalSvcIP string `json:"k8sExternalIP,omitempty"` + Role string `json:"role,omitempty"` + DbPasswordSecret string `json:"dbPasswordSecret"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".status.gsm.state",name="Gsm State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.services",name="Services",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.shards",name="shards",type=string,priority=1 + +// ShardingDatabase is the Schema for the shardingdatabases API +// +kubebuilder:resource:path=shardingdatabases,scope=Namespaced +// +kubebuilder:storageversion +type ShardingDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ShardingDatabaseSpec `json:"spec,omitempty"` + Status ShardingDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// ShardingDatabaseList contains a list of ShardingDatabase +type ShardingDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ShardingDatabase `json:"items"` +} + +// ShardSpec is a specification of Shards for an application deployment. +// +k8s:openapi-gen=true +type ShardSpec struct { + Name string `json:"name"` // Shard name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + // +kubebuilder:validation:Enum=enable;disable;failed;force + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ShardSpace string `json:"shardSpace,omitempty"` + ShardGroup string `json:"shardGroup,omitempty"` + ShardRegion string `json:"shardRegion,omitempty"` + DeployAs string `json:"deployAs,omitempty"` +} + +// CatalogSpec defines the desired state of CatalogSpec +// +k8s:openapi-gen=true +type CatalogSpec struct { + Name string `json:"name"` // Catalog name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Catalog Storage Size and This parameter will not be used if you use PvcName + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Catalog + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// GsmSpec defines the desired state of GsmSpec +// +k8s:openapi-gen=true +type GsmSpec struct { + Name string `json:"name"` // Gsm name that will be used deploy StatefulSet + + //Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for GSM + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // This parameter will not be used if you use OraGsmPvcName + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` // Optional GSM Label + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Region string `json:"region,omitempty"` + DirectorName string `json:"directorName,omitempty"` +} + +// ShardGroupSpec Specification + +type GsmShardGroupSpec struct { + Name string `json:"name"` // Name of the shardgroup. + Region string `json:"region,omitempty"` + DeployAs string `json:"deployAs,omitempty"` +} + +// ShardSpace Specs +type GsmShardSpaceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Chunks int `json:"chunks,omitempty"` //chunks is optional + ProtectionMode string `json:"protectionMode,omitempty"` // Data guard protection mode + ShardGroup string `json:"shardGroup,omitempty"` +} + +// Service Definition +type GsmServiceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Available string `json:"available,omitempty"` + ClbGoal string `json:"clbGoal,omitempty"` + CommitOutcome string `json:"commitOutcome,omitempty"` + DrainTimeout string `json:"drainTimeout,omitempty"` + Dtp string `json:"dtp,omitempty"` + Edition string `json:"edition,omitempty"` + FailoverPrimary string `json:"failoverPrimary,omitempty"` + FailoverRestore string `json:"failoverRestore,omitempty"` + FailoverDelay string `json:"failoverDelay,omitempty"` + FailoverMethod string `json:"failoverMethod,omitempty"` + FailoverRetry string `json:"failoverRetry,omitempty"` + FailoverType string `json:"failoverType,omitempty"` + GdsPool string `json:"gdsPool,omitempty"` + Role string `json:"role,omitempty"` + SessionState string `json:"sessionState,omitempty"` + Lag int `json:"lag,omitempty"` + Locality string `json:"locality,omitempty"` + Notification string `json:"notification,omitempty"` + PdbName string `json:"pdbName,omitempty"` + Policy string `json:"policy,omitempty"` + Preferrred string `json:"preferred,omitempty"` + PreferredAll string `json:"prferredAll,omitempty"` + RegionFailover string `json:"regionFailover,omitempty"` + StopOption string `json:"stopOption,omitempty"` + SqlTrasactionProfile string `json:"sqlTransactionProfile,omitempty"` + TableFamily string `json:"tableFamily,omitempty"` + Retention string `json:"retention,omitempty"` + TfaPolicy string `json:"tfaPolicy,omitempty"` +} + +// Secret Details +type SecretDetails struct { + Name string `json:"name"` // Name of the secret. + KeyFileName string `json:"keyFileName,omitempty"` // Name of the key. + NsConfigMap string `json:"nsConfigMap,omitempty"` + NsSecret string `json:"nsSecret,omitempty"` + PwdFileName string `json:"pwdFileName"` + PwdFileMountLocation string `json:"pwdFileMountLocation,omitempty"` + KeyFileMountLocation string `json:"keyFileMountLocation,omitempty"` + KeySecretName string `json:"keySecretName,omitempty"` + EncryptionType string `json:"encryptionType,omitempty"` +} + +// EnvironmentVariable represents a named variable accessible for containers. +// +k8s:openapi-gen=true +type EnvironmentVariable struct { + Name string `json:"name"` // Name of the variable. Must be a C_IDENTIFIER. + Value string `json:"value"` // Value of the variable, as defined in Kubernetes core API. +} + +// PortMapping is a specification of port mapping for an application deployment. +// +k8s:openapi-gen=true +type PortMapping struct { + Port int32 `json:"port"` // Port that will be exposed on the service. + TargetPort int32 `json:"targetPort"` // Docker image port for the application. + Protocol corev1.Protocol `json:"protocol"` // IP protocol for the mapping, e.g., "TCP" or "UDP". +} + +type SfsetLabel string + +const ( + ShardingDelLabelKey SfsetLabel = "sharding.oracle.com/delflag" + ShardingDelLabelTrueValue SfsetLabel = "true" + ShardingDelLabelFalseValue SfsetLabel = "false" +) + +type ShardStatusMapKeys string + +const ( + Name ShardStatusMapKeys = "Name" + K8sInternalSvc ShardStatusMapKeys = "K8sInternalSvc" + K8sExternalSvc ShardStatusMapKeys = "K8sExternalSvc" + K8sInternalSvcIP ShardStatusMapKeys = "K8sInternalSvcIP" + K8sExternalSvcIP ShardStatusMapKeys = "K8sExternalSvcIP" + OracleSid ShardStatusMapKeys = "OracleSid" + OraclePdb ShardStatusMapKeys = "OraclePdb" + Role ShardStatusMapKeys = "Role" + DbPasswordSecret ShardStatusMapKeys = "DbPasswordSecret" + State ShardStatusMapKeys = "State" + OpenMode ShardStatusMapKeys = "OpenMode" +) + +type ShardLifecycleState string + +const ( + AvailableState ShardLifecycleState = "AVAILABLE" + FailedState ShardLifecycleState = "FAILED" + UpdateState ShardLifecycleState = "UPDATING" + ProvisionState ShardLifecycleState = "PROVISIONING" + PodNotReadyState ShardLifecycleState = "PODNOTREADY" + PodFailureState ShardLifecycleState = "PODFAILURE" + PodNotFound ShardLifecycleState = "PODNOTFOUND" + StatefulSetFailure ShardLifecycleState = "STATEFULSETFAILURE" + StatefulSetNotFound ShardLifecycleState = "STATEFULSETNOTFOUND" + DeletingState ShardLifecycleState = "DELETING" + DeleteErrorState ShardLifecycleState = "DELETE_ERROR" + ChunkMoveError ShardLifecycleState = "CHUNK_MOVE_ERROR_IN_GSM" + Terminated ShardLifecycleState = "TERMINATED" + LabelPatchingError ShardLifecycleState = "LABELPATCHINGERROR" + DeletePVCError ShardLifecycleState = "DELETEPVCERROR" + AddingShardState ShardLifecycleState = "SHARD_ADDITION" + AddingShardErrorState ShardLifecycleState = "SHARD_ADDITION_ERROR_IN_GSM" + ShardOnlineErrorState ShardLifecycleState = "SHARD_ONLINE_ERROR_IN_GSM" + ShardOnlineState ShardLifecycleState = "ONLINE_SHARD" + ShardRemoveError ShardLifecycleState = "SHARD_DELETE_ERROR_FROM_GSM" +) + +type CrdReconcileState string + +const ( + CrdReconcileErrorState CrdReconcileState = "ReconcileError" + CrdReconcileErrorReason CrdReconcileState = "LastReconcileCycleFailed" + CrdReconcileQueuedState CrdReconcileState = "ReconcileQueued" + CrdReconcileQueuedReason CrdReconcileState = "LastReconcileCycleQueued" + CrdReconcileCompeleteState CrdReconcileState = "ReconcileComplete" + CrdReconcileCompleteReason CrdReconcileState = "LastReconcileCycleCompleted" + CrdReconcileWaitingState CrdReconcileState = "ReconcileWaiting" + CrdReconcileWaitingReason CrdReconcileState = "LastReconcileCycleWaiting" +) + +// var +var KubeConfigOnce sync.Once + +// #const lastSuccessfulSpec = "lastSuccessfulSpec" +const lastSuccessfulSpecOnsInfo = "lastSuccessfulSpeOnsInfo" + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulSpec() (*ShardingDatabaseSpec, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := ShardingDatabaseSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpec(kubeClient client.Client) error { + specBytes, err := json.Marshal(shardingv1.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + lastSuccessfulSpec: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + +// GetLastSuccessfulOnsInfo returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulOnsInfo() ([]byte, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpecOnsInfo] + if !ok { + return nil, nil + } + specBytes := []byte(val) + return specBytes, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpecOnsInfo(kubeClient client.Client, specBytes []byte) error { + + anns := map[string]string{ + lastSuccessfulSpecOnsInfo: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + +func init() { + SchemeBuilder.Register(&ShardingDatabase{}, &ShardingDatabaseList{}) +} diff --git a/apis/database/v4/shardingdatabase_webhook.go b/apis/database/v4/shardingdatabase_webhook.go new file mode 100644 index 00000000..1ac74d08 --- /dev/null +++ b/apis/database/v4/shardingdatabase_webhook.go @@ -0,0 +1,314 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var shardingdatabaselog = logf.Log.WithName("shardingdatabase-resource") + +func (r *ShardingDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v4,name=mshardingdatabasev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &ShardingDatabase{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *ShardingDatabase) Default() { + shardingdatabaselog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. + if r.Spec.GsmDevMode != "" { + r.Spec.GsmDevMode = "dev" + } + + if r.Spec.IsTdeWallet == "" { + r.Spec.IsTdeWallet = "disable" + } + for pindex := range r.Spec.Shard { + if strings.ToLower(r.Spec.Shard[pindex].IsDelete) == "" { + r.Spec.Shard[pindex].IsDelete = "disable" + } + } + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v4,name=vshardingdatabasev4.kb.io,admissionReviewVersions={v1} + +var _ webhook.Validator = &ShardingDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateCreate() (admission.Warnings, error) { + shardingdatabaselog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + // Check Secret configuration + var validationErr field.ErrorList + var validationErrs1 field.ErrorList + + //namespaces := db.GetWatchNamespaces() + //_, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + // if len(namespaces) != 0 && !containsNamespace { + // validationErr = append(validationErr, + // field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + // "Oracle database operator doesn't watch over this namespace")) + //} + + if r.Spec.DbSecret == nil { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret"), r.Spec.DbSecret, + "DbSecret cannot be set to nil")) + } else { + if len(r.Spec.DbSecret.Name) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("Name"), r.Spec.DbSecret.Name, + "Secret name cannot be set empty")) + } + if len(r.Spec.DbSecret.PwdFileName) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileName"), r.Spec.DbSecret.PwdFileName, + "Password file name cannot be set empty")) + } + if strings.ToLower(r.Spec.DbSecret.EncryptionType) != "base64" { + if strings.ToLower(r.Spec.DbSecret.KeyFileName) == "" { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileName"), r.Spec.DbSecret.KeyFileName, + "Key file name cannot be empty")) + } + } + + /** + if len(r.Spec.DbSecret.PwdFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileMountLocation"), r.Spec.DbSecret.PwdFileMountLocation, + "Password file mount location cannot be empty")) + } + + if len(r.Spec.DbSecret.KeyFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileMountLocation"), r.Spec.DbSecret.KeyFileMountLocation, + "KeyFileMountLocation file mount location cannot be empty")) + } + **/ + } + + if r.Spec.IsTdeWallet == "enable" { + if (len(r.Spec.FssStorageClass) == 0) && (len(r.Spec.TdeWalletPvc) == 0) { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("FssStorageClass"), r.Spec.FssStorageClass, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("TdeWalletPvc"), r.Spec.TdeWalletPvc, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + } + } + + if r.Spec.IsTdeWallet != "" { + if (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "disable") { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("isTdeWallet"), r.Spec.IsTdeWallet, + "isTdeWallet can be set to only \"enable\" or \"disable\"")) + } + } + + validationErrs1 = r.validateShardIsDelete() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateFreeEdition() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateCatalogName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateShardName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + // TODO(user): fill in your validation logic upon object creation. + if len(validationErr) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "ShardingDatabase"}, + r.Name, validationErr) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + shardingdatabaselog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateDelete() (admission.Warnings, error) { + shardingdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// ###### Vlaidation Block ################# + +func (r *ShardingDatabase) validateShardIsDelete() field.ErrorList { + + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "disable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "failed") { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("isDelete"), r.Spec.Shard[pindex].IsDelete, + "r.Spec.Shard[pindex].IsDelete can be set to only enable|disable|failed")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateFreeEdition() field.ErrorList { + + var validationErrs field.ErrorList + if strings.ToLower(r.Spec.DbEdition) == "free" { + // Shard Spec Checks + for i := 0; i < len(r.Spec.Shard); i++ { + for index, variable := range r.Spec.Shard[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + // Catalog Spec Checks + for i := 0; i < len(r.Spec.Catalog); i++ { + for index, variable := range r.Spec.Catalog[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateShardName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if len(r.Spec.Shard[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("Name"), r.Spec.Shard[pindex].Name, + "Shard Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateCatalogName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Catalog { + if len(r.Spec.Catalog[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("catalog").Child("Name"), r.Spec.Catalog[pindex].Name, + "Catalog Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} diff --git a/apis/database/v4/singleinstancedatabase_conversion.go b/apis/database/v4/singleinstancedatabase_conversion.go new file mode 100644 index 00000000..93638482 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*SingleInstanceDatabase) Hub() {} diff --git a/apis/database/v4/singleinstancedatabase_types.go b/apis/database/v4/singleinstancedatabase_types.go new file mode 100644 index 00000000..4f4836d7 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_types.go @@ -0,0 +1,231 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase +type SingleInstanceDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:Enum=standard;enterprise;express;free + Edition string `json:"edition,omitempty"` + + // SID must be alphanumeric (no special characters, only a-z, A-Z, 0-9), and no longer than 12 characters. + // +k8s:openapi-gen=true + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]+$` + // +kubebuilder:validation:MaxLength:=12 + Sid string `json:"sid,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ListenerPort int `json:"listenerPort,omitempty"` + TcpsListenerPort int `json:"tcpsListenerPort,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + FlashBack *bool `json:"flashBack,omitempty"` + ArchiveLog *bool `json:"archiveLog,omitempty"` + ForceLogging *bool `json:"forceLog,omitempty"` + EnableTCPS bool `json:"enableTCPS,omitempty"` + TcpsCertRenewInterval string `json:"tcpsCertRenewInterval,omitempty"` + TcpsTlsSecret string `json:"tcpsTlsSecret,omitempty"` + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + // +kubebuilder:validation:Enum=primary;standby;clone;truecache + CreateAs string `json:"createAs,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + TrueCacheServices []string `json:"trueCacheServices,omitempty"` + + // +k8s:openapi-gen=true + Replicas int `json:"replicas,omitempty"` + + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + AdminPassword SingleInstanceDatabaseAdminPassword `json:"adminPassword,omitempty"` + Image SingleInstanceDatabaseImage `json:"image"` + Persistence SingleInstanceDatabasePersistence `json:"persistence,omitempty"` + InitParams *SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Resources SingleInstanceDatabaseResources `json:"resources,omitempty"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` +} + +type SingleInstanceDatabaseResource struct { + Cpu string `json:"cpu,omitempty"` + Memory string `json:"memory,omitempty"` +} + +type SingleInstanceDatabaseResources struct { + Requests *SingleInstanceDatabaseResource `json:"requests,omitempty"` + Limits *SingleInstanceDatabaseResource `json:"limits,omitempty"` +} + +// SingleInstanceDatabasePersistence defines the storage size and class for PVC +type SingleInstanceDatabasePersistence struct { + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany + AccessMode string `json:"accessMode,omitempty"` + DatafilesVolumeName string `json:"datafilesVolumeName,omitempty"` + ScriptsVolumeName string `json:"scriptsVolumeName,omitempty"` + VolumeClaimAnnotation string `json:"volumeClaimAnnotation,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` +} + +// SingleInstanceDatabaseInitParams defines the Init Parameters +type SingleInstanceDatabaseInitParams struct { + SgaTarget int `json:"sgaTarget,omitempty"` + PgaAggregateTarget int `json:"pgaAggregateTarget,omitempty"` + CpuCount int `json:"cpuCount,omitempty"` + Processes int `json:"processes,omitempty"` +} + +// SingleInstanceDatabaseImage defines the Image source and pullSecrets for POD +type SingleInstanceDatabaseImage struct { + Version string `json:"version,omitempty"` + PullFrom string `json:"pullFrom"` + PullSecrets string `json:"pullSecrets,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` +} + +// SingleInsatnceAdminPassword defines the secret containing Admin Password mapped to secretKey for Database +type SingleInstanceDatabaseAdminPassword struct { + SecretName string `json:"secretName"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` +} + +// SingleInstanceDatabaseStatus defines the observed state of SingleInstanceDatabase +type SingleInstanceDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Nodes []string `json:"nodes,omitempty"` + Role string `json:"role,omitempty"` + Status string `json:"status,omitempty"` + Replicas int `json:"replicas,omitempty"` + ReleaseUpdate string `json:"releaseUpdate,omitempty"` + DgBroker *string `json:"dgBroker,omitempty"` + // +kubebuilder:default:="false" + DatafilesPatched string `json:"datafilesPatched,omitempty"` + ConnectString string `json:"connectString,omitempty"` + ClusterConnectString string `json:"clusterConnectString,omitempty"` + TcpsConnectString string `json:"tcpsConnectString,omitempty"` + StandbyDatabases map[string]string `json:"standbyDatabases,omitempty"` + // +kubebuilder:default:="false" + DatafilesCreated string `json:"datafilesCreated,omitempty"` + Sid string `json:"sid,omitempty"` + Edition string `json:"edition,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + InitSgaSize int `json:"initSgaSize,omitempty"` + InitPgaSize int `json:"initPgaSize,omitempty"` + CreatedAs string `json:"createdAs,omitempty"` + FlashBack string `json:"flashBack,omitempty"` + ArchiveLog string `json:"archiveLog,omitempty"` + ForceLogging string `json:"forceLog,omitempty"` + OemExpressUrl string `json:"oemExpressUrl,omitempty"` + OrdsReference string `json:"ordsReference,omitempty"` + PdbConnectString string `json:"pdbConnectString,omitempty"` + TcpsPdbConnectString string `json:"tcpsPdbConnectString,omitempty"` + ApexInstalled bool `json:"apexInstalled,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` + // +kubebuilder:default:=false + IsTcpsEnabled bool `json:"isTcpsEnabled"` + CertCreationTimestamp string `json:"certCreationTimestamp,omitempty"` + CertRenewInterval string `json:"certRenewInterval,omitempty"` + ClientWalletLoc string `json:"clientWalletLoc,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + // +kubebuilder:default:="" + TcpsTlsSecret string `json:"tcpsTlsSecret"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + InitParams SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Persistence SingleInstanceDatabasePersistence `json:"persistence"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas +// +kubebuilder:printcolumn:JSONPath=".status.edition",name="Edition",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.sid",name="Sid",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.role",name="Role",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.releaseUpdate",name="Version",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.connectString",name="Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.pdbConnectString",name="Pdb Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.tcpsConnectString",name="TCPS Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.tcpsPdbConnectString",name="TCPS Pdb Connect Str",type="string", priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.oemExpressUrl",name="Oem Express Url",type="string" + +// SingleInstanceDatabase is the Schema for the singleinstancedatabases API +// +kubebuilder:storageversion +type SingleInstanceDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SingleInstanceDatabaseSpec `json:"spec,omitempty"` + Status SingleInstanceDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// SingleInstanceDatabaseList contains a list of SingleInstanceDatabase +type SingleInstanceDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SingleInstanceDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SingleInstanceDatabase{}, &SingleInstanceDatabaseList{}) +} diff --git a/apis/database/v4/singleinstancedatabase_webhook.go b/apis/database/v4/singleinstancedatabase_webhook.go new file mode 100644 index 00000000..b327d7d4 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var singleinstancedatabaselog = logf.Log.WithName("singleinstancedatabase-resource") + +func (r *SingleInstanceDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/zz_generated.deepcopy.go b/apis/database/v4/zz_generated.deepcopy.go new file mode 100644 index 00000000..4eb9425d --- /dev/null +++ b/apis/database/v4/zz_generated.deepcopy.go @@ -0,0 +1,4213 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcdSpec) DeepCopyInto(out *AcdSpec) { + *out = *in + in.K8sAcd.DeepCopyInto(&out.K8sAcd) + in.OciAcd.DeepCopyInto(&out.OciAcd) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcdSpec. +func (in *AcdSpec) DeepCopy() *AcdSpec { + if in == nil { + return nil + } + out := new(AcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminpdbPass) DeepCopyInto(out *AdminpdbPass) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminpdbPass. +func (in *AdminpdbPass) DeepCopy() *AdminpdbPass { + if in == nil { + return nil + } + out := new(AdminpdbPass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminpdbUser) DeepCopyInto(out *AdminpdbUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminpdbUser. +func (in *AdminpdbUser) DeepCopy() *AdminpdbUser { + if in == nil { + return nil + } + out := new(AdminpdbUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabase) DeepCopyInto(out *AutonomousContainerDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabase. +func (in *AutonomousContainerDatabase) DeepCopy() *AutonomousContainerDatabase { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseList) DeepCopyInto(out *AutonomousContainerDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousContainerDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseList. +func (in *AutonomousContainerDatabaseList) DeepCopy() *AutonomousContainerDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseSpec) DeepCopyInto(out *AutonomousContainerDatabaseSpec) { + *out = *in + if in.AutonomousContainerDatabaseOCID != nil { + in, out := &in.AutonomousContainerDatabaseOCID, &out.AutonomousContainerDatabaseOCID + *out = new(string) + **out = **in + } + if in.CompartmentOCID != nil { + in, out := &in.CompartmentOCID, &out.CompartmentOCID + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousExadataVMClusterOCID != nil { + in, out := &in.AutonomousExadataVMClusterOCID, &out.AutonomousExadataVMClusterOCID + *out = new(string) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseSpec. +func (in *AutonomousContainerDatabaseSpec) DeepCopy() *AutonomousContainerDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseStatus) DeepCopyInto(out *AutonomousContainerDatabaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseStatus. +func (in *AutonomousContainerDatabaseStatus) DeepCopy() *AutonomousContainerDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabase) DeepCopyInto(out *AutonomousDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabase. +func (in *AutonomousDatabase) DeepCopy() *AutonomousDatabase { + if in == nil { + return nil + } + out := new(AutonomousDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackup) DeepCopyInto(out *AutonomousDatabaseBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackup. +func (in *AutonomousDatabaseBackup) DeepCopy() *AutonomousDatabaseBackup { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupList) DeepCopyInto(out *AutonomousDatabaseBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupList. +func (in *AutonomousDatabaseBackupList) DeepCopy() *AutonomousDatabaseBackupList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupSpec) DeepCopyInto(out *AutonomousDatabaseBackupSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousDatabaseBackupOCID != nil { + in, out := &in.AutonomousDatabaseBackupOCID, &out.AutonomousDatabaseBackupOCID + *out = new(string) + **out = **in + } + if in.IsLongTermBackup != nil { + in, out := &in.IsLongTermBackup, &out.IsLongTermBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodInDays != nil { + in, out := &in.RetentionPeriodInDays, &out.RetentionPeriodInDays + *out = new(int) + **out = **in + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupSpec. +func (in *AutonomousDatabaseBackupSpec) DeepCopy() *AutonomousDatabaseBackupSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupStatus) DeepCopyInto(out *AutonomousDatabaseBackupStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupStatus. +func (in *AutonomousDatabaseBackupStatus) DeepCopy() *AutonomousDatabaseBackupStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBase) DeepCopyInto(out *AutonomousDatabaseBase) { + *out = *in + if in.CompartmentId != nil { + in, out := &in.CompartmentId, &out.CompartmentId + *out = new(string) + **out = **in + } + in.AutonomousContainerDatabase.DeepCopyInto(&out.AutonomousContainerDatabase) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DbName != nil { + in, out := &in.DbName, &out.DbName + *out = new(string) + **out = **in + } + if in.DbVersion != nil { + in, out := &in.DbVersion, &out.DbVersion + *out = new(string) + **out = **in + } + if in.DataStorageSizeInTBs != nil { + in, out := &in.DataStorageSizeInTBs, &out.DataStorageSizeInTBs + *out = new(int) + **out = **in + } + if in.CpuCoreCount != nil { + in, out := &in.CpuCoreCount, &out.CpuCoreCount + *out = new(int) + **out = **in + } + if in.ComputeCount != nil { + in, out := &in.ComputeCount, &out.ComputeCount + *out = new(float32) + **out = **in + } + if in.OcpuCount != nil { + in, out := &in.OcpuCount, &out.OcpuCount + *out = new(float32) + **out = **in + } + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.IsAutoScalingEnabled != nil { + in, out := &in.IsAutoScalingEnabled, &out.IsAutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.IsDedicated != nil { + in, out := &in.IsDedicated, &out.IsDedicated + *out = new(bool) + **out = **in + } + if in.IsFreeTier != nil { + in, out := &in.IsFreeTier, &out.IsFreeTier + *out = new(bool) + **out = **in + } + if in.IsAccessControlEnabled != nil { + in, out := &in.IsAccessControlEnabled, &out.IsAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.WhitelistedIps != nil { + in, out := &in.WhitelistedIps, &out.WhitelistedIps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SubnetId != nil { + in, out := &in.SubnetId, &out.SubnetId + *out = new(string) + **out = **in + } + if in.NsgIds != nil { + in, out := &in.NsgIds, &out.NsgIds + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PrivateEndpointLabel != nil { + in, out := &in.PrivateEndpointLabel, &out.PrivateEndpointLabel + *out = new(string) + **out = **in + } + if in.IsMtlsConnectionRequired != nil { + in, out := &in.IsMtlsConnectionRequired, &out.IsMtlsConnectionRequired + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBase. +func (in *AutonomousDatabaseBase) DeepCopy() *AutonomousDatabaseBase { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseClone) DeepCopyInto(out *AutonomousDatabaseClone) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseClone. +func (in *AutonomousDatabaseClone) DeepCopy() *AutonomousDatabaseClone { + if in == nil { + return nil + } + out := new(AutonomousDatabaseClone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseDetails. +func (in *AutonomousDatabaseDetails) DeepCopy() *AutonomousDatabaseDetails { + if in == nil { + return nil + } + out := new(AutonomousDatabaseDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseList) DeepCopyInto(out *AutonomousDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseList. +func (in *AutonomousDatabaseList) DeepCopy() *AutonomousDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestore) DeepCopyInto(out *AutonomousDatabaseRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestore. +func (in *AutonomousDatabaseRestore) DeepCopy() *AutonomousDatabaseRestore { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreList) DeepCopyInto(out *AutonomousDatabaseRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreList. +func (in *AutonomousDatabaseRestoreList) DeepCopy() *AutonomousDatabaseRestoreList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreSpec) DeepCopyInto(out *AutonomousDatabaseRestoreSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + in.Source.DeepCopyInto(&out.Source) + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreSpec. +func (in *AutonomousDatabaseRestoreSpec) DeepCopy() *AutonomousDatabaseRestoreSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreStatus) DeepCopyInto(out *AutonomousDatabaseRestoreStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreStatus. +func (in *AutonomousDatabaseRestoreStatus) DeepCopy() *AutonomousDatabaseRestoreStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseSpec) DeepCopyInto(out *AutonomousDatabaseSpec) { + *out = *in + in.Details.DeepCopyInto(&out.Details) + in.Clone.DeepCopyInto(&out.Clone) + in.Wallet.DeepCopyInto(&out.Wallet) + in.OciConfig.DeepCopyInto(&out.OciConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseSpec. +func (in *AutonomousDatabaseSpec) DeepCopy() *AutonomousDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseStatus) DeepCopyInto(out *AutonomousDatabaseStatus) { + *out = *in + if in.AllConnectionStrings != nil { + in, out := &in.AllConnectionStrings, &out.AllConnectionStrings + *out = make([]ConnectionStringProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseStatus. +func (in *AutonomousDatabaseStatus) DeepCopy() *AutonomousDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backupconfig) DeepCopyInto(out *Backupconfig) { + *out = *in + if in.AutoBackupEnabled != nil { + in, out := &in.AutoBackupEnabled, &out.AutoBackupEnabled + *out = new(bool) + **out = **in + } + if in.RecoveryWindowsInDays != nil { + in, out := &in.RecoveryWindowsInDays, &out.RecoveryWindowsInDays + *out = new(int) + **out = **in + } + if in.AutoBackupWindow != nil { + in, out := &in.AutoBackupWindow, &out.AutoBackupWindow + *out = new(string) + **out = **in + } + if in.BackupDestinationDetails != nil { + in, out := &in.BackupDestinationDetails, &out.BackupDestinationDetails + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backupconfig. +func (in *Backupconfig) DeepCopy() *Backupconfig { + if in == nil { + return nil + } + out := new(Backupconfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDB) DeepCopyInto(out *CDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDB. +func (in *CDB) DeepCopy() *CDB { + if in == nil { + return nil + } + out := new(CDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminPassword) DeepCopyInto(out *CDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminPassword. +func (in *CDBAdminPassword) DeepCopy() *CDBAdminPassword { + if in == nil { + return nil + } + out := new(CDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminUser) DeepCopyInto(out *CDBAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminUser. +func (in *CDBAdminUser) DeepCopy() *CDBAdminUser { + if in == nil { + return nil + } + out := new(CDBAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBList) DeepCopyInto(out *CDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBList. +func (in *CDBList) DeepCopy() *CDBList { + if in == nil { + return nil + } + out := new(CDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPRIVKEY) DeepCopyInto(out *CDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPRIVKEY. +func (in *CDBPRIVKEY) DeepCopy() *CDBPRIVKEY { + if in == nil { + return nil + } + out := new(CDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPUBKEY) DeepCopyInto(out *CDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPUBKEY. +func (in *CDBPUBKEY) DeepCopy() *CDBPUBKEY { + if in == nil { + return nil + } + out := new(CDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSecret) DeepCopyInto(out *CDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSecret. +func (in *CDBSecret) DeepCopy() *CDBSecret { + if in == nil { + return nil + } + out := new(CDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSpec) DeepCopyInto(out *CDBSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.CDBAdminUser = in.CDBAdminUser + out.CDBAdminPwd = in.CDBAdminPwd + out.CDBTlsKey = in.CDBTlsKey + out.CDBTlsCrt = in.CDBTlsCrt + out.ORDSPwd = in.ORDSPwd + out.WebServerUser = in.WebServerUser + out.WebServerPwd = in.WebServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CDBPubKey = in.CDBPubKey + out.CDBPriKey = in.CDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSpec. +func (in *CDBSpec) DeepCopy() *CDBSpec { + if in == nil { + return nil + } + out := new(CDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBStatus) DeepCopyInto(out *CDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBStatus. +func (in *CDBStatus) DeepCopy() *CDBStatus { + if in == nil { + return nil + } + out := new(CDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSysAdminPassword) DeepCopyInto(out *CDBSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSysAdminPassword. +func (in *CDBSysAdminPassword) DeepCopy() *CDBSysAdminPassword { + if in == nil { + return nil + } + out := new(CDBSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSCRT) DeepCopyInto(out *CDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSCRT. +func (in *CDBTLSCRT) DeepCopy() *CDBTLSCRT { + if in == nil { + return nil + } + out := new(CDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSKEY) DeepCopyInto(out *CDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSKEY. +func (in *CDBTLSKEY) DeepCopy() *CDBTLSKEY { + if in == nil { + return nil + } + out := new(CDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. +func (in *CatalogSpec) DeepCopy() *CatalogSpec { + if in == nil { + return nil + } + out := new(CatalogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSecret) DeepCopyInto(out *CertificateSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSecret. +func (in *CertificateSecret) DeepCopy() *CertificateSecret { + if in == nil { + return nil + } + out := new(CertificateSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringProfile) DeepCopyInto(out *ConnectionStringProfile) { + *out = *in + if in.ConnectionStrings != nil { + in, out := &in.ConnectionStrings, &out.ConnectionStrings + *out = make([]ConnectionStringSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringProfile. +func (in *ConnectionStringProfile) DeepCopy() *ConnectionStringProfile { + if in == nil { + return nil + } + out := new(ConnectionStringProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringSpec) DeepCopyInto(out *ConnectionStringSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringSpec. +func (in *ConnectionStringSpec) DeepCopy() *ConnectionStringSpec { + if in == nil { + return nil + } + out := new(ConnectionStringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBWalletSecret) DeepCopyInto(out *DBWalletSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBWalletSecret. +func (in *DBWalletSecret) DeepCopy() *DBWalletSecret { + if in == nil { + return nil + } + out := new(DBWalletSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBroker) DeepCopyInto(out *DataguardBroker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBroker. +func (in *DataguardBroker) DeepCopy() *DataguardBroker { + if in == nil { + return nil + } + out := new(DataguardBroker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBroker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerList) DeepCopyInto(out *DataguardBrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataguardBroker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerList. +func (in *DataguardBrokerList) DeepCopy() *DataguardBrokerList { + if in == nil { + return nil + } + out := new(DataguardBrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerSpec) DeepCopyInto(out *DataguardBrokerSpec) { + *out = *in + if in.StandbyDatabaseRefs != nil { + in, out := &in.StandbyDatabaseRefs, &out.StandbyDatabaseRefs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerSpec. +func (in *DataguardBrokerSpec) DeepCopy() *DataguardBrokerSpec { + if in == nil { + return nil + } + out := new(DataguardBrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerStatus) DeepCopyInto(out *DataguardBrokerStatus) { + *out = *in + if in.DatabasesInDataguardConfig != nil { + in, out := &in.DatabasesInDataguardConfig, &out.DatabasesInDataguardConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerStatus. +func (in *DataguardBrokerStatus) DeepCopy() *DataguardBrokerStatus { + if in == nil { + return nil + } + out := new(DataguardBrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneConfig) DeepCopyInto(out *DbCloneConfig) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneConfig. +func (in *DbCloneConfig) DeepCopy() *DbCloneConfig { + if in == nil { + return nil + } + out := new(DbCloneConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneStatus) DeepCopyInto(out *DbCloneStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneStatus. +func (in *DbCloneStatus) DeepCopy() *DbCloneStatus { + if in == nil { + return nil + } + out := new(DbCloneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbStatus) DeepCopyInto(out *DbStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbStatus. +func (in *DbStatus) DeepCopy() *DbStatus { + if in == nil { + return nil + } + out := new(DbStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbSystemDetails) DeepCopyInto(out *DbSystemDetails) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FaultDomains != nil { + in, out := &in.FaultDomains, &out.FaultDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(int) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DbBackupConfig.DeepCopyInto(&out.DbBackupConfig) + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbSystemDetails. +func (in *DbSystemDetails) DeepCopy() *DbSystemDetails { + if in == nil { + return nil + } + out := new(DbSystemDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbWorkrequests) DeepCopyInto(out *DbWorkrequests) { + *out = *in + if in.OperationType != nil { + in, out := &in.OperationType, &out.OperationType + *out = new(string) + **out = **in + } + if in.OperationId != nil { + in, out := &in.OperationId, &out.OperationId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbWorkrequests. +func (in *DbWorkrequests) DeepCopy() *DbWorkrequests { + if in == nil { + return nil + } + out := new(DbWorkrequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystem) DeepCopyInto(out *DbcsSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystem. +func (in *DbcsSystem) DeepCopy() *DbcsSystem { + if in == nil { + return nil + } + out := new(DbcsSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemList) DeepCopyInto(out *DbcsSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DbcsSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemList. +func (in *DbcsSystemList) DeepCopy() *DbcsSystemList { + if in == nil { + return nil + } + out := new(DbcsSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemSpec) DeepCopyInto(out *DbcsSystemSpec) { + *out = *in + in.DbSystem.DeepCopyInto(&out.DbSystem) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.OCIConfigMap != nil { + in, out := &in.OCIConfigMap, &out.OCIConfigMap + *out = new(string) + **out = **in + } + if in.OCISecret != nil { + in, out := &in.OCISecret, &out.OCISecret + *out = new(string) + **out = **in + } + if in.DbClone != nil { + in, out := &in.DbClone, &out.DbClone + *out = new(DbCloneConfig) + (*in).DeepCopyInto(*out) + } + if in.PdbConfigs != nil { + in, out := &in.PdbConfigs, &out.PdbConfigs + *out = make([]PDBConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DbBackupId != nil { + in, out := &in.DbBackupId, &out.DbBackupId + *out = new(string) + **out = **in + } + if in.DatabaseId != nil { + in, out := &in.DatabaseId, &out.DatabaseId + *out = new(string) + **out = **in + } + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemSpec. +func (in *DbcsSystemSpec) DeepCopy() *DbcsSystemSpec { + if in == nil { + return nil + } + out := new(DbcsSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemStatus) DeepCopyInto(out *DbcsSystemStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.DataStoragePercentage != nil { + in, out := &in.DataStoragePercentage, &out.DataStoragePercentage + *out = new(int) + **out = **in + } + if in.DataStorageSizeInGBs != nil { + in, out := &in.DataStorageSizeInGBs, &out.DataStorageSizeInGBs + *out = new(int) + **out = **in + } + if in.RecoStorageSizeInGB != nil { + in, out := &in.RecoStorageSizeInGB, &out.RecoStorageSizeInGB + *out = new(int) + **out = **in + } + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = new(string) + **out = **in + } + if in.DbInfo != nil { + in, out := &in.DbInfo, &out.DbInfo + *out = make([]DbStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Network.DeepCopyInto(&out.Network) + if in.WorkRequests != nil { + in, out := &in.WorkRequests, &out.WorkRequests + *out = make([]DbWorkrequests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.KMSDetailsStatus = in.KMSDetailsStatus + in.DbCloneStatus.DeepCopyInto(&out.DbCloneStatus) + if in.PdbDetailsStatus != nil { + in, out := &in.PdbDetailsStatus, &out.PdbDetailsStatus + *out = make([]PDBDetailsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemStatus. +func (in *DbcsSystemStatus) DeepCopy() *DbcsSystemStatus { + if in == nil { + return nil + } + out := new(DbcsSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariable) DeepCopyInto(out *EnvironmentVariable) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariable. +func (in *EnvironmentVariable) DeepCopy() *EnvironmentVariable { + if in == nil { + return nil + } + out := new(EnvironmentVariable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSettings) DeepCopyInto(out *GlobalSettings) { + *out = *in + if in.CacheMetadataEnabled != nil { + in, out := &in.CacheMetadataEnabled, &out.CacheMetadataEnabled + *out = new(bool) + **out = **in + } + if in.CacheMetadataGraphQLExpireAfterAccess != nil { + in, out := &in.CacheMetadataGraphQLExpireAfterAccess, &out.CacheMetadataGraphQLExpireAfterAccess + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataGraphQLExpireAfterWrite != nil { + in, out := &in.CacheMetadataGraphQLExpireAfterWrite, &out.CacheMetadataGraphQLExpireAfterWrite + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataTimeout != nil { + in, out := &in.CacheMetadataTimeout, &out.CacheMetadataTimeout + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataJWKSEnabled != nil { + in, out := &in.CacheMetadataJWKSEnabled, &out.CacheMetadataJWKSEnabled + *out = new(bool) + **out = **in + } + if in.CacheMetadataJWKSInitialCapacity != nil { + in, out := &in.CacheMetadataJWKSInitialCapacity, &out.CacheMetadataJWKSInitialCapacity + *out = new(int32) + **out = **in + } + if in.CacheMetadataJWKSMaximumSize != nil { + in, out := &in.CacheMetadataJWKSMaximumSize, &out.CacheMetadataJWKSMaximumSize + *out = new(int32) + **out = **in + } + if in.CacheMetadataJWKSExpireAfterAccess != nil { + in, out := &in.CacheMetadataJWKSExpireAfterAccess, &out.CacheMetadataJWKSExpireAfterAccess + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataJWKSExpireAfterWrite != nil { + in, out := &in.CacheMetadataJWKSExpireAfterWrite, &out.CacheMetadataJWKSExpireAfterWrite + *out = new(timex.Duration) + **out = **in + } + if in.DatabaseAPIEnabled != nil { + in, out := &in.DatabaseAPIEnabled, &out.DatabaseAPIEnabled + *out = new(bool) + **out = **in + } + if in.DatabaseAPIManagementServicesDisabled != nil { + in, out := &in.DatabaseAPIManagementServicesDisabled, &out.DatabaseAPIManagementServicesDisabled + *out = new(bool) + **out = **in + } + if in.DBInvalidPoolTimeout != nil { + in, out := &in.DBInvalidPoolTimeout, &out.DBInvalidPoolTimeout + *out = new(timex.Duration) + **out = **in + } + if in.FeatureGraphQLMaxNestingDepth != nil { + in, out := &in.FeatureGraphQLMaxNestingDepth, &out.FeatureGraphQLMaxNestingDepth + *out = new(int32) + **out = **in + } + if in.SecurityCredentialsAttempts != nil { + in, out := &in.SecurityCredentialsAttempts, &out.SecurityCredentialsAttempts + *out = new(int32) + **out = **in + } + if in.SecurityCredentialsLockTime != nil { + in, out := &in.SecurityCredentialsLockTime, &out.SecurityCredentialsLockTime + *out = new(timex.Duration) + **out = **in + } + if in.StandaloneHTTPPort != nil { + in, out := &in.StandaloneHTTPPort, &out.StandaloneHTTPPort + *out = new(int32) + **out = **in + } + if in.StandaloneHTTPSPort != nil { + in, out := &in.StandaloneHTTPSPort, &out.StandaloneHTTPSPort + *out = new(int32) + **out = **in + } + if in.StandaloneStopTimeout != nil { + in, out := &in.StandaloneStopTimeout, &out.StandaloneStopTimeout + *out = new(timex.Duration) + **out = **in + } + if in.DebugPrintDebugToScreen != nil { + in, out := &in.DebugPrintDebugToScreen, &out.DebugPrintDebugToScreen + *out = new(bool) + **out = **in + } + if in.ICAPPort != nil { + in, out := &in.ICAPPort, &out.ICAPPort + *out = new(int32) + **out = **in + } + if in.ICAPSecurePort != nil { + in, out := &in.ICAPSecurePort, &out.ICAPSecurePort + *out = new(int32) + **out = **in + } + if in.MongoPort != nil { + in, out := &in.MongoPort, &out.MongoPort + *out = new(int32) + **out = **in + } + if in.MongoIdleTimeout != nil { + in, out := &in.MongoIdleTimeout, &out.MongoIdleTimeout + *out = new(timex.Duration) + **out = **in + } + if in.MongoOpTimeout != nil { + in, out := &in.MongoOpTimeout, &out.MongoOpTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityDisableDefaultExclusionList != nil { + in, out := &in.SecurityDisableDefaultExclusionList, &out.SecurityDisableDefaultExclusionList + *out = new(bool) + **out = **in + } + if in.SecurityMaxEntries != nil { + in, out := &in.SecurityMaxEntries, &out.SecurityMaxEntries + *out = new(int32) + **out = **in + } + if in.SecurityVerifySSL != nil { + in, out := &in.SecurityVerifySSL, &out.SecurityVerifySSL + *out = new(bool) + **out = **in + } + if in.CertSecret != nil { + in, out := &in.CertSecret, &out.CertSecret + *out = new(CertificateSecret) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSettings. +func (in *GlobalSettings) DeepCopy() *GlobalSettings { + if in == nil { + return nil + } + out := new(GlobalSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmServiceSpec) DeepCopyInto(out *GsmServiceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmServiceSpec. +func (in *GsmServiceSpec) DeepCopy() *GsmServiceSpec { + if in == nil { + return nil + } + out := new(GsmServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardDetails) DeepCopyInto(out *GsmShardDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardDetails. +func (in *GsmShardDetails) DeepCopy() *GsmShardDetails { + if in == nil { + return nil + } + out := new(GsmShardDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardGroupSpec) DeepCopyInto(out *GsmShardGroupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardGroupSpec. +func (in *GsmShardGroupSpec) DeepCopy() *GsmShardGroupSpec { + if in == nil { + return nil + } + out := new(GsmShardGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardSpaceSpec) DeepCopyInto(out *GsmShardSpaceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardSpaceSpec. +func (in *GsmShardSpaceSpec) DeepCopy() *GsmShardSpaceSpec { + if in == nil { + return nil + } + out := new(GsmShardSpaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmSpec) DeepCopyInto(out *GsmSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmSpec. +func (in *GsmSpec) DeepCopy() *GsmSpec { + if in == nil { + return nil + } + out := new(GsmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatus) DeepCopyInto(out *GsmStatus) { + *out = *in + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatus. +func (in *GsmStatus) DeepCopy() *GsmStatus { + if in == nil { + return nil + } + out := new(GsmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatusDetails) DeepCopyInto(out *GsmStatusDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatusDetails. +func (in *GsmStatusDetails) DeepCopy() *GsmStatusDetails { + if in == nil { + return nil + } + out := new(GsmStatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sADBBackupSpec) DeepCopyInto(out *K8sADBBackupSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sADBBackupSpec. +func (in *K8sADBBackupSpec) DeepCopy() *K8sADBBackupSpec { + if in == nil { + return nil + } + out := new(K8sADBBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAcdSpec) DeepCopyInto(out *K8sAcdSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAcdSpec. +func (in *K8sAcdSpec) DeepCopy() *K8sAcdSpec { + if in == nil { + return nil + } + out := new(K8sAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAdbSpec) DeepCopyInto(out *K8sAdbSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbSpec. +func (in *K8sAdbSpec) DeepCopy() *K8sAdbSpec { + if in == nil { + return nil + } + out := new(K8sAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sSecretSpec) DeepCopyInto(out *K8sSecretSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sSecretSpec. +func (in *K8sSecretSpec) DeepCopy() *K8sSecretSpec { + if in == nil { + return nil + } + out := new(K8sSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSDetailsStatus) DeepCopyInto(out *KMSDetailsStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSDetailsStatus. +func (in *KMSDetailsStatus) DeepCopy() *KMSDetailsStatus { + if in == nil { + return nil + } + out := new(KMSDetailsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LREST) DeepCopyInto(out *LREST) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LREST. +func (in *LREST) DeepCopy() *LREST { + if in == nil { + return nil + } + out := new(LREST) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LREST) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTAdminPassword) DeepCopyInto(out *LRESTAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTAdminPassword. +func (in *LRESTAdminPassword) DeepCopy() *LRESTAdminPassword { + if in == nil { + return nil + } + out := new(LRESTAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTAdminUser) DeepCopyInto(out *LRESTAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTAdminUser. +func (in *LRESTAdminUser) DeepCopy() *LRESTAdminUser { + if in == nil { + return nil + } + out := new(LRESTAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTList) DeepCopyInto(out *LRESTList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LREST, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTList. +func (in *LRESTList) DeepCopy() *LRESTList { + if in == nil { + return nil + } + out := new(LRESTList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRESTList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPRVKEY) DeepCopyInto(out *LRESTPRVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPRVKEY. +func (in *LRESTPRVKEY) DeepCopy() *LRESTPRVKEY { + if in == nil { + return nil + } + out := new(LRESTPRVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPUBKEY) DeepCopyInto(out *LRESTPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPUBKEY. +func (in *LRESTPUBKEY) DeepCopy() *LRESTPUBKEY { + if in == nil { + return nil + } + out := new(LRESTPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPassword) DeepCopyInto(out *LRESTPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPassword. +func (in *LRESTPassword) DeepCopy() *LRESTPassword { + if in == nil { + return nil + } + out := new(LRESTPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSecret) DeepCopyInto(out *LRESTSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSecret. +func (in *LRESTSecret) DeepCopy() *LRESTSecret { + if in == nil { + return nil + } + out := new(LRESTSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSpec) DeepCopyInto(out *LRESTSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.LRESTAdminUser = in.LRESTAdminUser + out.LRESTAdminPwd = in.LRESTAdminPwd + out.LRESTTlsKey = in.LRESTTlsKey + out.LRESTTlsCrt = in.LRESTTlsCrt + out.LRESTPubKey = in.LRESTPubKey + out.LRESTPriKey = in.LRESTPriKey + out.LRESTPwd = in.LRESTPwd + out.WebLrestServerUser = in.WebLrestServerUser + out.WebLrestServerPwd = in.WebLrestServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSpec. +func (in *LRESTSpec) DeepCopy() *LRESTSpec { + if in == nil { + return nil + } + out := new(LRESTSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTStatus) DeepCopyInto(out *LRESTStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTStatus. +func (in *LRESTStatus) DeepCopy() *LRESTStatus { + if in == nil { + return nil + } + out := new(LRESTStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSysAdminPassword) DeepCopyInto(out *LRESTSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSysAdminPassword. +func (in *LRESTSysAdminPassword) DeepCopy() *LRESTSysAdminPassword { + if in == nil { + return nil + } + out := new(LRESTSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTTLSCRT) DeepCopyInto(out *LRESTTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTTLSCRT. +func (in *LRESTTLSCRT) DeepCopy() *LRESTTLSCRT { + if in == nil { + return nil + } + out := new(LRESTTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTTLSKEY) DeepCopyInto(out *LRESTTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTTLSKEY. +func (in *LRESTTLSKEY) DeepCopy() *LRESTTLSKEY { + if in == nil { + return nil + } + out := new(LRESTTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDB) DeepCopyInto(out *LRPDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDB. +func (in *LRPDB) DeepCopy() *LRPDB { + if in == nil { + return nil + } + out := new(LRPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRPDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBAdminName) DeepCopyInto(out *LRPDBAdminName) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBAdminName. +func (in *LRPDBAdminName) DeepCopy() *LRPDBAdminName { + if in == nil { + return nil + } + out := new(LRPDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBAdminPassword) DeepCopyInto(out *LRPDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBAdminPassword. +func (in *LRPDBAdminPassword) DeepCopy() *LRPDBAdminPassword { + if in == nil { + return nil + } + out := new(LRPDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBList) DeepCopyInto(out *LRPDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LRPDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBList. +func (in *LRPDBList) DeepCopy() *LRPDBList { + if in == nil { + return nil + } + out := new(LRPDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRPDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBPRVKEY) DeepCopyInto(out *LRPDBPRVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBPRVKEY. +func (in *LRPDBPRVKEY) DeepCopy() *LRPDBPRVKEY { + if in == nil { + return nil + } + out := new(LRPDBPRVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBSecret) DeepCopyInto(out *LRPDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBSecret. +func (in *LRPDBSecret) DeepCopy() *LRPDBSecret { + if in == nil { + return nil + } + out := new(LRPDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBSpec) DeepCopyInto(out *LRPDBSpec) { + *out = *in + out.LRPDBTlsKey = in.LRPDBTlsKey + out.LRPDBTlsCrt = in.LRPDBTlsCrt + out.LRPDBTlsCat = in.LRPDBTlsCat + out.LRPDBPriKey = in.LRPDBPriKey + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.AdminpdbUser = in.AdminpdbUser + out.AdminpdbPass = in.AdminpdbPass + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in + } + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in + } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in + } + out.WebLrpdbServerUser = in.WebLrpdbServerUser + out.WebLrpdbServerPwd = in.WebLrpdbServerPwd + if in.LTDEImport != nil { + in, out := &in.LTDEImport, &out.LTDEImport + *out = new(bool) + **out = **in + } + if in.LTDEExport != nil { + in, out := &in.LTDEExport, &out.LTDEExport + *out = new(bool) + **out = **in + } + out.LTDEPassword = in.LTDEPassword + out.LTDESecret = in.LTDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBSpec. +func (in *LRPDBSpec) DeepCopy() *LRPDBSpec { + if in == nil { + return nil + } + out := new(LRPDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBStatus) DeepCopyInto(out *LRPDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBStatus. +func (in *LRPDBStatus) DeepCopy() *LRPDBStatus { + if in == nil { + return nil + } + out := new(LRPDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSCAT) DeepCopyInto(out *LRPDBTLSCAT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSCAT. +func (in *LRPDBTLSCAT) DeepCopy() *LRPDBTLSCAT { + if in == nil { + return nil + } + out := new(LRPDBTLSCAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSCRT) DeepCopyInto(out *LRPDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSCRT. +func (in *LRPDBTLSCRT) DeepCopy() *LRPDBTLSCRT { + if in == nil { + return nil + } + out := new(LRPDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSKEY) DeepCopyInto(out *LRPDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSKEY. +func (in *LRPDBTLSKEY) DeepCopy() *LRPDBTLSKEY { + if in == nil { + return nil + } + out := new(LRPDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LTDEPwd) DeepCopyInto(out *LTDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LTDEPwd. +func (in *LTDEPwd) DeepCopy() *LTDEPwd { + if in == nil { + return nil + } + out := new(LTDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LTDESecret) DeepCopyInto(out *LTDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LTDESecret. +func (in *LTDESecret) DeepCopy() *LTDESecret { + if in == nil { + return nil + } + out := new(LTDESecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ORDSPassword) DeepCopyInto(out *ORDSPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ORDSPassword. +func (in *ORDSPassword) DeepCopy() *ORDSPassword { + if in == nil { + return nil + } + out := new(ORDSPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAcdSpec) DeepCopyInto(out *OciAcdSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAcdSpec. +func (in *OciAcdSpec) DeepCopy() *OciAcdSpec { + if in == nil { + return nil + } + out := new(OciAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAdbSpec) DeepCopyInto(out *OciAdbSpec) { + *out = *in + if in.OCID != nil { + in, out := &in.OCID, &out.OCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAdbSpec. +func (in *OciAdbSpec) DeepCopy() *OciAdbSpec { + if in == nil { + return nil + } + out := new(OciAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciConfigSpec) DeepCopyInto(out *OciConfigSpec) { + *out = *in + if in.ConfigMapName != nil { + in, out := &in.ConfigMapName, &out.ConfigMapName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciConfigSpec. +func (in *OciConfigSpec) DeepCopy() *OciConfigSpec { + if in == nil { + return nil + } + out := new(OciConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciSecretSpec) DeepCopyInto(out *OciSecretSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciSecretSpec. +func (in *OciSecretSpec) DeepCopy() *OciSecretSpec { + if in == nil { + return nil + } + out := new(OciSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataService) DeepCopyInto(out *OracleRestDataService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataService. +func (in *OracleRestDataService) DeepCopy() *OracleRestDataService { + if in == nil { + return nil + } + out := new(OracleRestDataService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceImage) DeepCopyInto(out *OracleRestDataServiceImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceImage. +func (in *OracleRestDataServiceImage) DeepCopy() *OracleRestDataServiceImage { + if in == nil { + return nil + } + out := new(OracleRestDataServiceImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceList) DeepCopyInto(out *OracleRestDataServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OracleRestDataService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceList. +func (in *OracleRestDataServiceList) DeepCopy() *OracleRestDataServiceList { + if in == nil { + return nil + } + out := new(OracleRestDataServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePassword) DeepCopyInto(out *OracleRestDataServicePassword) { + *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePassword. +func (in *OracleRestDataServicePassword) DeepCopy() *OracleRestDataServicePassword { + if in == nil { + return nil + } + out := new(OracleRestDataServicePassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePersistence) DeepCopyInto(out *OracleRestDataServicePersistence) { + *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePersistence. +func (in *OracleRestDataServicePersistence) DeepCopy() *OracleRestDataServicePersistence { + if in == nil { + return nil + } + out := new(OracleRestDataServicePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopyInto(out *OracleRestDataServiceRestEnableSchemas) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceRestEnableSchemas. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopy() *OracleRestDataServiceRestEnableSchemas { + if in == nil { + return nil + } + out := new(OracleRestDataServiceRestEnableSchemas) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceSpec) DeepCopyInto(out *OracleRestDataServiceSpec) { + *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Image = in.Image + in.OrdsPassword.DeepCopyInto(&out.OrdsPassword) + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.RestEnableSchemas != nil { + in, out := &in.RestEnableSchemas, &out.RestEnableSchemas + *out = make([]OracleRestDataServiceRestEnableSchemas, len(*in)) + copy(*out, *in) + } + in.Persistence.DeepCopyInto(&out.Persistence) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceSpec. +func (in *OracleRestDataServiceSpec) DeepCopy() *OracleRestDataServiceSpec { + if in == nil { + return nil + } + out := new(OracleRestDataServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceStatus) DeepCopyInto(out *OracleRestDataServiceStatus) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceStatus. +func (in *OracleRestDataServiceStatus) DeepCopy() *OracleRestDataServiceStatus { + if in == nil { + return nil + } + out := new(OracleRestDataServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvs) DeepCopyInto(out *OrdsSrvs) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvs. +func (in *OrdsSrvs) DeepCopy() *OrdsSrvs { + if in == nil { + return nil + } + out := new(OrdsSrvs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrdsSrvs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsList) DeepCopyInto(out *OrdsSrvsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OrdsSrvs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsList. +func (in *OrdsSrvsList) DeepCopy() *OrdsSrvsList { + if in == nil { + return nil + } + out := new(OrdsSrvsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrdsSrvsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsSpec) DeepCopyInto(out *OrdsSrvsSpec) { + *out = *in + in.GlobalSettings.DeepCopyInto(&out.GlobalSettings) + out.EncPrivKey = in.EncPrivKey + if in.PoolSettings != nil { + in, out := &in.PoolSettings, &out.PoolSettings + *out = make([]*PoolSettings, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PoolSettings) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsSpec. +func (in *OrdsSrvsSpec) DeepCopy() *OrdsSrvsSpec { + if in == nil { + return nil + } + out := new(OrdsSrvsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsStatus) DeepCopyInto(out *OrdsSrvsStatus) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(int32) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsStatus. +func (in *OrdsSrvsStatus) DeepCopy() *OrdsSrvsStatus { + if in == nil { + return nil + } + out := new(OrdsSrvsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDB) DeepCopyInto(out *PDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDB. +func (in *PDB) DeepCopy() *PDB { + if in == nil { + return nil + } + out := new(PDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminName) DeepCopyInto(out *PDBAdminName) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminName. +func (in *PDBAdminName) DeepCopy() *PDBAdminName { + if in == nil { + return nil + } + out := new(PDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminPassword) DeepCopyInto(out *PDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminPassword. +func (in *PDBAdminPassword) DeepCopy() *PDBAdminPassword { + if in == nil { + return nil + } + out := new(PDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfig) DeepCopyInto(out *PDBConfig) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) + **out = **in + } + if in.PdbAdminPassword != nil { + in, out := &in.PdbAdminPassword, &out.PdbAdminPassword + *out = new(string) + **out = **in + } + if in.TdeWalletPassword != nil { + in, out := &in.TdeWalletPassword, &out.TdeWalletPassword + *out = new(string) + **out = **in + } + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IsDelete != nil { + in, out := &in.IsDelete, &out.IsDelete + *out = new(bool) + **out = **in + } + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfig. +func (in *PDBConfig) DeepCopy() *PDBConfig { + if in == nil { + return nil + } + out := new(PDBConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfigStatus) DeepCopyInto(out *PDBConfigStatus) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) + **out = **in + } + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfigStatus. +func (in *PDBConfigStatus) DeepCopy() *PDBConfigStatus { + if in == nil { + return nil + } + out := new(PDBConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBDetailsStatus) DeepCopyInto(out *PDBDetailsStatus) { + *out = *in + if in.PDBConfigStatus != nil { + in, out := &in.PDBConfigStatus, &out.PDBConfigStatus + *out = make([]PDBConfigStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBDetailsStatus. +func (in *PDBDetailsStatus) DeepCopy() *PDBDetailsStatus { + if in == nil { + return nil + } + out := new(PDBDetailsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBList) DeepCopyInto(out *PDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBList. +func (in *PDBList) DeepCopy() *PDBList { + if in == nil { + return nil + } + out := new(PDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPRIVKEY) DeepCopyInto(out *PDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPRIVKEY. +func (in *PDBPRIVKEY) DeepCopy() *PDBPRIVKEY { + if in == nil { + return nil + } + out := new(PDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPUBKEY) DeepCopyInto(out *PDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPUBKEY. +func (in *PDBPUBKEY) DeepCopy() *PDBPUBKEY { + if in == nil { + return nil + } + out := new(PDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSecret) DeepCopyInto(out *PDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSecret. +func (in *PDBSecret) DeepCopy() *PDBSecret { + if in == nil { + return nil + } + out := new(PDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSpec) DeepCopyInto(out *PDBSpec) { + *out = *in + out.PDBTlsKey = in.PDBTlsKey + out.PDBTlsCrt = in.PDBTlsCrt + out.PDBTlsCat = in.PDBTlsCat + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.WebServerUsr = in.WebServerUsr + out.WebServerPwd = in.WebServerPwd + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in + } + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in + } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in + } + if in.TDEImport != nil { + in, out := &in.TDEImport, &out.TDEImport + *out = new(bool) + **out = **in + } + if in.TDEExport != nil { + in, out := &in.TDEExport, &out.TDEExport + *out = new(bool) + **out = **in + } + out.TDEPassword = in.TDEPassword + out.TDESecret = in.TDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) + **out = **in + } + out.PDBPubKey = in.PDBPubKey + out.PDBPriKey = in.PDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSpec. +func (in *PDBSpec) DeepCopy() *PDBSpec { + if in == nil { + return nil + } + out := new(PDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBStatus) DeepCopyInto(out *PDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBStatus. +func (in *PDBStatus) DeepCopy() *PDBStatus { + if in == nil { + return nil + } + out := new(PDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCAT) DeepCopyInto(out *PDBTLSCAT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCAT. +func (in *PDBTLSCAT) DeepCopy() *PDBTLSCAT { + if in == nil { + return nil + } + out := new(PDBTLSCAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCRT) DeepCopyInto(out *PDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCRT. +func (in *PDBTLSCRT) DeepCopy() *PDBTLSCRT { + if in == nil { + return nil + } + out := new(PDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSKEY) DeepCopyInto(out *PDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSKEY. +func (in *PDBTLSKEY) DeepCopy() *PDBTLSKEY { + if in == nil { + return nil + } + out := new(PDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PITSpec) DeepCopyInto(out *PITSpec) { + *out = *in + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PITSpec. +func (in *PITSpec) DeepCopy() *PITSpec { + if in == nil { + return nil + } + out := new(PITSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordSecret) DeepCopyInto(out *PasswordSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSecret. +func (in *PasswordSecret) DeepCopy() *PasswordSecret { + if in == nil { + return nil + } + out := new(PasswordSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { + *out = *in + in.K8sSecret.DeepCopyInto(&out.K8sSecret) + in.OciSecret.DeepCopyInto(&out.OciSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. +func (in *PasswordSpec) DeepCopy() *PasswordSpec { + if in == nil { + return nil + } + out := new(PasswordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolSettings) DeepCopyInto(out *PoolSettings) { + *out = *in + out.DBSecret = in.DBSecret + out.DBAdminUserSecret = in.DBAdminUserSecret + out.DBCDBAdminUserSecret = in.DBCDBAdminUserSecret + if in.DBPoolDestroyTimeout != nil { + in, out := &in.DBPoolDestroyTimeout, &out.DBPoolDestroyTimeout + *out = new(timex.Duration) + **out = **in + } + if in.DebugTrackResources != nil { + in, out := &in.DebugTrackResources, &out.DebugTrackResources + *out = new(bool) + **out = **in + } + if in.FeatureOpenservicebrokerExclude != nil { + in, out := &in.FeatureOpenservicebrokerExclude, &out.FeatureOpenservicebrokerExclude + *out = new(bool) + **out = **in + } + if in.FeatureSDW != nil { + in, out := &in.FeatureSDW, &out.FeatureSDW + *out = new(bool) + **out = **in + } + if in.OwaTraceSql != nil { + in, out := &in.OwaTraceSql, &out.OwaTraceSql + *out = new(bool) + **out = **in + } + if in.SecurityJWTProfileEnabled != nil { + in, out := &in.SecurityJWTProfileEnabled, &out.SecurityJWTProfileEnabled + *out = new(bool) + **out = **in + } + if in.SecurityJWKSSize != nil { + in, out := &in.SecurityJWKSSize, &out.SecurityJWKSSize + *out = new(int32) + **out = **in + } + if in.SecurityJWKSConnectionTimeout != nil { + in, out := &in.SecurityJWKSConnectionTimeout, &out.SecurityJWKSConnectionTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWKSReadTimeout != nil { + in, out := &in.SecurityJWKSReadTimeout, &out.SecurityJWKSReadTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWKSRefreshInterval != nil { + in, out := &in.SecurityJWKSRefreshInterval, &out.SecurityJWKSRefreshInterval + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWTAllowedSkew != nil { + in, out := &in.SecurityJWTAllowedSkew, &out.SecurityJWTAllowedSkew + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWTAllowedAge != nil { + in, out := &in.SecurityJWTAllowedAge, &out.SecurityJWTAllowedAge + *out = new(timex.Duration) + **out = **in + } + if in.DBPort != nil { + in, out := &in.DBPort, &out.DBPort + *out = new(int32) + **out = **in + } + if in.JDBCInactivityTimeout != nil { + in, out := &in.JDBCInactivityTimeout, &out.JDBCInactivityTimeout + *out = new(int32) + **out = **in + } + if in.JDBCInitialLimit != nil { + in, out := &in.JDBCInitialLimit, &out.JDBCInitialLimit + *out = new(int32) + **out = **in + } + if in.JDBCMaxConnectionReuseCount != nil { + in, out := &in.JDBCMaxConnectionReuseCount, &out.JDBCMaxConnectionReuseCount + *out = new(int32) + **out = **in + } + if in.JDBCMaxConnectionReuseTime != nil { + in, out := &in.JDBCMaxConnectionReuseTime, &out.JDBCMaxConnectionReuseTime + *out = new(int32) + **out = **in + } + if in.JDBCSecondsToTrustIdleConnection != nil { + in, out := &in.JDBCSecondsToTrustIdleConnection, &out.JDBCSecondsToTrustIdleConnection + *out = new(int32) + **out = **in + } + if in.JDBCMaxLimit != nil { + in, out := &in.JDBCMaxLimit, &out.JDBCMaxLimit + *out = new(int32) + **out = **in + } + if in.JDBCAuthEnabled != nil { + in, out := &in.JDBCAuthEnabled, &out.JDBCAuthEnabled + *out = new(bool) + **out = **in + } + if in.JDBCMaxStatementsLimit != nil { + in, out := &in.JDBCMaxStatementsLimit, &out.JDBCMaxStatementsLimit + *out = new(int32) + **out = **in + } + if in.JDBCMinLimit != nil { + in, out := &in.JDBCMinLimit, &out.JDBCMinLimit + *out = new(int32) + **out = **in + } + if in.JDBCStatementTimeout != nil { + in, out := &in.JDBCStatementTimeout, &out.JDBCStatementTimeout + *out = new(int32) + **out = **in + } + if in.MiscPaginationMaxRows != nil { + in, out := &in.MiscPaginationMaxRows, &out.MiscPaginationMaxRows + *out = new(int32) + **out = **in + } + if in.RestEnabledSqlActive != nil { + in, out := &in.RestEnabledSqlActive, &out.RestEnabledSqlActive + *out = new(bool) + **out = **in + } + if in.DBWalletSecret != nil { + in, out := &in.DBWalletSecret, &out.DBWalletSecret + *out = new(DBWalletSecret) + **out = **in + } + if in.TNSAdminSecret != nil { + in, out := &in.TNSAdminSecret, &out.TNSAdminSecret + *out = new(TNSAdminSecret) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolSettings. +func (in *PoolSettings) DeepCopy() *PoolSettings { + if in == nil { + return nil + } + out := new(PoolSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriVKey) DeepCopyInto(out *PriVKey) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriVKey. +func (in *PriVKey) DeepCopy() *PriVKey { + if in == nil { + return nil + } + out := new(PriVKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretDetails) DeepCopyInto(out *SecretDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretDetails. +func (in *SecretDetails) DeepCopy() *SecretDetails { + if in == nil { + return nil + } + out := new(SecretDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardSpec) DeepCopyInto(out *ShardSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardSpec. +func (in *ShardSpec) DeepCopy() *ShardSpec { + if in == nil { + return nil + } + out := new(ShardSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabase) DeepCopyInto(out *ShardingDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabase. +func (in *ShardingDatabase) DeepCopy() *ShardingDatabase { + if in == nil { + return nil + } + out := new(ShardingDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShardingDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseList) DeepCopyInto(out *ShardingDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ShardingDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseList. +func (in *ShardingDatabaseList) DeepCopy() *ShardingDatabaseList { + if in == nil { + return nil + } + out := new(ShardingDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShardingDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseSpec) DeepCopyInto(out *ShardingDatabaseSpec) { + *out = *in + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make([]ShardSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = make([]CatalogSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Gsm != nil { + in, out := &in.Gsm, &out.Gsm + *out = make([]GsmSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortMappings != nil { + in, out := &in.PortMappings, &out.PortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.GsmShardSpace != nil { + in, out := &in.GsmShardSpace, &out.GsmShardSpace + *out = make([]GsmShardSpaceSpec, len(*in)) + copy(*out, *in) + } + if in.GsmShardGroup != nil { + in, out := &in.GsmShardGroup, &out.GsmShardGroup + *out = make([]GsmShardGroupSpec, len(*in)) + copy(*out, *in) + } + if in.ShardRegion != nil { + in, out := &in.ShardRegion, &out.ShardRegion + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GsmService != nil { + in, out := &in.GsmService, &out.GsmService + *out = make([]GsmServiceSpec, len(*in)) + copy(*out, *in) + } + if in.DbSecret != nil { + in, out := &in.DbSecret, &out.DbSecret + *out = new(SecretDetails) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseSpec. +func (in *ShardingDatabaseSpec) DeepCopy() *ShardingDatabaseSpec { + if in == nil { + return nil + } + out := new(ShardingDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseStatus) DeepCopyInto(out *ShardingDatabaseStatus) { + *out = *in + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Gsm.DeepCopyInto(&out.Gsm) + if in.CrdStatus != nil { + in, out := &in.CrdStatus, &out.CrdStatus + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseStatus. +func (in *ShardingDatabaseStatus) DeepCopy() *ShardingDatabaseStatus { + if in == nil { + return nil + } + out := new(ShardingDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabase) DeepCopyInto(out *SingleInstanceDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabase. +func (in *SingleInstanceDatabase) DeepCopy() *SingleInstanceDatabase { + if in == nil { + return nil + } + out := new(SingleInstanceDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SingleInstanceDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseAdminPassword) DeepCopyInto(out *SingleInstanceDatabaseAdminPassword) { + *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseAdminPassword. +func (in *SingleInstanceDatabaseAdminPassword) DeepCopy() *SingleInstanceDatabaseAdminPassword { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseImage) DeepCopyInto(out *SingleInstanceDatabaseImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseImage. +func (in *SingleInstanceDatabaseImage) DeepCopy() *SingleInstanceDatabaseImage { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseInitParams) DeepCopyInto(out *SingleInstanceDatabaseInitParams) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseInitParams. +func (in *SingleInstanceDatabaseInitParams) DeepCopy() *SingleInstanceDatabaseInitParams { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseInitParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseList) DeepCopyInto(out *SingleInstanceDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SingleInstanceDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseList. +func (in *SingleInstanceDatabaseList) DeepCopy() *SingleInstanceDatabaseList { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SingleInstanceDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabasePersistence) DeepCopyInto(out *SingleInstanceDatabasePersistence) { + *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabasePersistence. +func (in *SingleInstanceDatabasePersistence) DeepCopy() *SingleInstanceDatabasePersistence { + if in == nil { + return nil + } + out := new(SingleInstanceDatabasePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResource) DeepCopyInto(out *SingleInstanceDatabaseResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResource. +func (in *SingleInstanceDatabaseResource) DeepCopy() *SingleInstanceDatabaseResource { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResources) DeepCopyInto(out *SingleInstanceDatabaseResources) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SingleInstanceDatabaseResource) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(SingleInstanceDatabaseResource) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResources. +func (in *SingleInstanceDatabaseResources) DeepCopy() *SingleInstanceDatabaseResources { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSpec) { + *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FlashBack != nil { + in, out := &in.FlashBack, &out.FlashBack + *out = new(bool) + **out = **in + } + if in.ArchiveLog != nil { + in, out := &in.ArchiveLog, &out.ArchiveLog + *out = new(bool) + **out = **in + } + if in.ForceLogging != nil { + in, out := &in.ForceLogging, &out.ForceLogging + *out = new(bool) + **out = **in + } + if in.TrueCacheServices != nil { + in, out := &in.TrueCacheServices, &out.TrueCacheServices + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + out.Image = in.Image + in.Persistence.DeepCopyInto(&out.Persistence) + if in.InitParams != nil { + in, out := &in.InitParams, &out.InitParams + *out = new(SingleInstanceDatabaseInitParams) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseSpec. +func (in *SingleInstanceDatabaseSpec) DeepCopy() *SingleInstanceDatabaseSpec { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseStatus) DeepCopyInto(out *SingleInstanceDatabaseStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DgBroker != nil { + in, out := &in.DgBroker, &out.DgBroker + *out = new(string) + **out = **in + } + if in.StandbyDatabases != nil { + in, out := &in.StandbyDatabases, &out.StandbyDatabases + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.InitParams = in.InitParams + in.Persistence.DeepCopyInto(&out.Persistence) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseStatus. +func (in *SingleInstanceDatabaseStatus) DeepCopy() *SingleInstanceDatabaseStatus { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { + *out = *in + in.K8sAdbBackup.DeepCopyInto(&out.K8sAdbBackup) + in.PointInTime.DeepCopyInto(&out.PointInTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. +func (in *SourceSpec) DeepCopy() *SourceSpec { + if in == nil { + return nil + } + out := new(SourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDEPwd) DeepCopyInto(out *TDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDEPwd. +func (in *TDEPwd) DeepCopy() *TDEPwd { + if in == nil { + return nil + } + out := new(TDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDESecret) DeepCopyInto(out *TDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDESecret. +func (in *TDESecret) DeepCopy() *TDESecret { + if in == nil { + return nil + } + out := new(TDESecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TNSAdminSecret) DeepCopyInto(out *TNSAdminSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TNSAdminSecret. +func (in *TNSAdminSecret) DeepCopy() *TNSAdminSecret { + if in == nil { + return nil + } + out := new(TNSAdminSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { + *out = *in + in.K8sAdb.DeepCopyInto(&out.K8sAdb) + in.OciAdb.DeepCopyInto(&out.OciAdb) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. +func (in *TargetSpec) DeepCopy() *TargetSpec { + if in == nil { + return nil + } + out := new(TargetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VmNetworkDetails) DeepCopyInto(out *VmNetworkDetails) { + *out = *in + if in.VcnName != nil { + in, out := &in.VcnName, &out.VcnName + *out = new(string) + **out = **in + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.ScanDnsName != nil { + in, out := &in.ScanDnsName, &out.ScanDnsName + *out = new(string) + **out = **in + } + if in.ListenerPort != nil { + in, out := &in.ListenerPort, &out.ListenerPort + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmNetworkDetails. +func (in *VmNetworkDetails) DeepCopy() *VmNetworkDetails { + if in == nil { + return nil + } + out := new(VmNetworkDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WalletSpec) DeepCopyInto(out *WalletSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + in.Password.DeepCopyInto(&out.Password) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WalletSpec. +func (in *WalletSpec) DeepCopy() *WalletSpec { + if in == nil { + return nil + } + out := new(WalletSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrestServerPassword) DeepCopyInto(out *WebLrestServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrestServerPassword. +func (in *WebLrestServerPassword) DeepCopy() *WebLrestServerPassword { + if in == nil { + return nil + } + out := new(WebLrestServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrestServerUser) DeepCopyInto(out *WebLrestServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrestServerUser. +func (in *WebLrestServerUser) DeepCopy() *WebLrestServerUser { + if in == nil { + return nil + } + out := new(WebLrestServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrpdbServerPassword) DeepCopyInto(out *WebLrpdbServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrpdbServerPassword. +func (in *WebLrpdbServerPassword) DeepCopy() *WebLrpdbServerPassword { + if in == nil { + return nil + } + out := new(WebLrpdbServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrpdbServerUser) DeepCopyInto(out *WebLrpdbServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrpdbServerUser. +func (in *WebLrpdbServerUser) DeepCopy() *WebLrpdbServerUser { + if in == nil { + return nil + } + out := new(WebLrpdbServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPassword) DeepCopyInto(out *WebServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPassword. +func (in *WebServerPassword) DeepCopy() *WebServerPassword { + if in == nil { + return nil + } + out := new(WebServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPasswordPDB) DeepCopyInto(out *WebServerPasswordPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPasswordPDB. +func (in *WebServerPasswordPDB) DeepCopy() *WebServerPasswordPDB { + if in == nil { + return nil + } + out := new(WebServerPasswordPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUser) DeepCopyInto(out *WebServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUser. +func (in *WebServerUser) DeepCopy() *WebServerUser { + if in == nil { + return nil + } + out := new(WebServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUserPDB) DeepCopyInto(out *WebServerUserPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUserPDB. +func (in *WebServerUserPDB) DeepCopy() *WebServerUserPDB { + if in == nil { + return nil + } + out := new(WebServerUserPDB) + in.DeepCopyInto(out) + return out +} diff --git a/apis/observability/v1/databaseobserver_types.go b/apis/observability/v1/databaseobserver_types.go new file mode 100644 index 00000000..642ff18b --- /dev/null +++ b/apis/observability/v1/databaseobserver_types.go @@ -0,0 +1,195 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1 + +import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusEnum string + +// DatabaseObserverSpec defines the desired state of DatabaseObserver +type DatabaseObserverSpec struct { + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` +} + +// DatabaseObserverDatabase defines the database details used for DatabaseObserver +type DatabaseObserverDatabase struct { + DBUser DBSecret `json:"dbUser,omitempty"` + DBPassword DBSecretWithVault `json:"dbPassword,omitempty"` + DBWallet DBSecret `json:"dbWallet,omitempty"` + DBConnectionString DBSecret `json:"dbConnectionString,omitempty"` +} + +// DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver +type DatabaseObserverExporterConfig struct { + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// DatabaseObserverService defines the exporter service component of DatabaseObserver +type DatabaseObserverService struct { + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// PrometheusConfig defines the generated resources for Prometheus +type PrometheusConfig struct { + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` +} + +// DBSecret defines secrets used in reference +type DBSecret struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` +} + +// DBSecretWithVault defines secrets used in reference with vault fields +type DBSecretWithVault struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` + VaultOCID string `json:"vaultOCID,omitempty"` + VaultSecretName string `json:"vaultSecretName,omitempty"` +} + +// DatabaseObserverConfigMap defines configMap used for metrics configuration +type DatabaseObserverConfigMap struct { + Configmap ConfigMapDetails `json:"configMap,omitempty"` +} + +// ConfigMapDetails defines the configmap name +type ConfigMapDetails struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` +} + +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI +type OCIConfigSpec struct { + ConfigMapName string `json:"configMapName,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// DatabaseObserverStatus defines the observed state of DatabaseObserver +type DatabaseObserverStatus struct { + Conditions []metav1.Condition `json:"conditions"` + Status string `json:"status,omitempty"` + ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` + Replicas int `json:"replicas,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" + +// DatabaseObserver is the Schema for the databaseobservers API +// +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string +type DatabaseObserver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseObserverSpec `json:"spec,omitempty"` + Status DatabaseObserverStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseObserverList contains a list of DatabaseObserver +type DatabaseObserverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseObserver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DatabaseObserver{}, &DatabaseObserverList{}) +} diff --git a/apis/observability/v1/databaseobserver_webhook.go b/apis/observability/v1/databaseobserver_webhook.go new file mode 100644 index 00000000..286d6ed6 --- /dev/null +++ b/apis/observability/v1/databaseobserver_webhook.go @@ -0,0 +1,185 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strings" +) + +// log is for logging in this package. +var databaseobserverlog = logf.Log.WithName("databaseobserver-resource") + +const ( + AllowedExporterImage = "container-registry.oracle.com/database/observability-exporter" + ErrorSpecValidationMissingConnString = "a required field for database connection string secret is missing or does not have a value" + ErrorSpecValidationMissingDBUser = "a required field for database user secret is missing or does not have a value" + ErrorSpecValidationMissingDBVaultField = "a field for the OCI vault has a value but the other required field is missing or does not have a value" + ErrorSpecValidationMissingOCIConfig = "a field(s) for the OCI Config is missing or does not have a value when fields for the OCI vault has values" + ErrorSpecValidationMissingDBPasswordSecret = "a required field for the database password secret is missing or does not have a value" + ErrorSpecExporterImageNotAllowed = "a different exporter image was found, only official database exporter container images are currently supported" +) + +func (r *DatabaseObserver) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-observability-oracle-com-v1-databaseobserver,mutating=true,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,verbs=create;update,versions=v1,name=mdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DatabaseObserver{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DatabaseObserver) Default() { + databaseobserverlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update,path=/validate-observability-oracle-com-v1-databaseobserver,mutating=false,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,versions=v1,name=vdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DatabaseObserver{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { + databaseobserverlog.Info("validate create", "name", r.Name) + + var e field.ErrorList + ns := dbcommons.GetWatchNamespaces() + + // Check for namespace/cluster scope access + if _, isDesiredNamespaceWithinScope := ns[r.Namespace]; !isDesiredNamespaceWithinScope && len(ns) > 0 { + e = append(e, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Check required secret for db user has value + if r.Spec.Database.DBUser.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbUser").Child("secret"), r.Spec.Database.DBUser.SecretName, + ErrorSpecValidationMissingDBUser)) + } + + // Check required secret for db connection string has value + if r.Spec.Database.DBConnectionString.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbConnectionString").Child("secret"), r.Spec.Database.DBConnectionString.SecretName, + ErrorSpecValidationMissingConnString)) + } + + // The other vault field must have value if one does + if (r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName == "") || + (r.Spec.Database.DBPassword.VaultSecretName != "" && r.Spec.Database.DBPassword.VaultOCID == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword"), r.Spec.Database.DBPassword, + ErrorSpecValidationMissingDBVaultField)) + } + + // if vault fields have value, ociConfig must have values + if r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName != "" && + (r.Spec.OCIConfig.SecretName == "" || r.Spec.OCIConfig.ConfigMapName == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("ociConfig"), r.Spec.OCIConfig, + ErrorSpecValidationMissingOCIConfig)) + } + + // If all of {DB Password Secret Name and vaultOCID+vaultSecretName} have no value, then error out + if r.Spec.Database.DBPassword.SecretName == "" && + r.Spec.Database.DBPassword.VaultOCID == "" && + r.Spec.Database.DBPassword.VaultSecretName == "" { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword").Child("secret"), r.Spec.Database.DBPassword.SecretName, + ErrorSpecValidationMissingDBPasswordSecret)) + } + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + databaseobserverlog.Info("validate update", "name", r.Name) + var e field.ErrorList + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateDelete() (admission.Warnings, error) { + databaseobserverlog.Info("validate delete", "name", r.Name) + + return nil, nil +} diff --git a/apis/observability/v1/groupversion_info.go b/apis/observability/v1/groupversion_info.go new file mode 100644 index 00000000..3f332c05 --- /dev/null +++ b/apis/observability/v1/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v1 contains API Schema definitions for the observability v1 API group +// +kubebuilder:object:generate=true +// +groupName=observability.oracle.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.oracle.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/observability/v1/zz_generated.deepcopy.go b/apis/observability/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..4924216f --- /dev/null +++ b/apis/observability/v1/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapDetails) DeepCopyInto(out *ConfigMapDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapDetails. +func (in *ConfigMapDetails) DeepCopy() *ConfigMapDetails { + if in == nil { + return nil + } + out := new(ConfigMapDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecret) DeepCopyInto(out *DBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecret. +func (in *DBSecret) DeepCopy() *DBSecret { + if in == nil { + return nil + } + out := new(DBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecretWithVault) DeepCopyInto(out *DBSecretWithVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecretWithVault. +func (in *DBSecretWithVault) DeepCopy() *DBSecretWithVault { + if in == nil { + return nil + } + out := new(DBSecretWithVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserver) DeepCopyInto(out *DatabaseObserver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserver. +func (in *DatabaseObserver) DeepCopy() *DatabaseObserver { + if in == nil { + return nil + } + out := new(DatabaseObserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverConfigMap) DeepCopyInto(out *DatabaseObserverConfigMap) { + *out = *in + out.Configmap = in.Configmap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverConfigMap. +func (in *DatabaseObserverConfigMap) DeepCopy() *DatabaseObserverConfigMap { + if in == nil { + return nil + } + out := new(DatabaseObserverConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDatabase) DeepCopyInto(out *DatabaseObserverDatabase) { + *out = *in + out.DBUser = in.DBUser + out.DBPassword = in.DBPassword + out.DBWallet = in.DBWallet + out.DBConnectionString = in.DBConnectionString +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDatabase. +func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { + if in == nil { + return nil + } + out := new(DatabaseObserverDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { + *out = *in + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. +func (in *DatabaseObserverExporterConfig) DeepCopy() *DatabaseObserverExporterConfig { + if in == nil { + return nil + } + out := new(DatabaseObserverExporterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverList) DeepCopyInto(out *DatabaseObserverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseObserver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverList. +func (in *DatabaseObserverList) DeepCopy() *DatabaseObserverList { + if in == nil { + return nil + } + out := new(DatabaseObserverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. +func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { + if in == nil { + return nil + } + out := new(DatabaseObserverService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { + *out = *in + out.Database = in.Database + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig + in.Prometheus.DeepCopyInto(&out.Prometheus) + out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. +func (in *DatabaseObserverSpec) DeepCopy() *DatabaseObserverSpec { + if in == nil { + return nil + } + out := new(DatabaseObserverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverStatus. +func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { + if in == nil { + return nil + } + out := new(DatabaseObserverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. +func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { + if in == nil { + return nil + } + out := new(OCIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { + if in == nil { + return nil + } + out := new(PrometheusServiceMonitor) + in.DeepCopyInto(out) + return out +} diff --git a/apis/observability/v1alpha1/databaseobserver_types.go b/apis/observability/v1alpha1/databaseobserver_types.go new file mode 100644 index 00000000..f4c62900 --- /dev/null +++ b/apis/observability/v1alpha1/databaseobserver_types.go @@ -0,0 +1,195 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusEnum string + +// DatabaseObserverSpec defines the desired state of DatabaseObserver +type DatabaseObserverSpec struct { + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` +} + +// DatabaseObserverDatabase defines the database details used for DatabaseObserver +type DatabaseObserverDatabase struct { + DBUser DBSecret `json:"dbUser,omitempty"` + DBPassword DBSecretWithVault `json:"dbPassword,omitempty"` + DBWallet DBSecret `json:"dbWallet,omitempty"` + DBConnectionString DBSecret `json:"dbConnectionString,omitempty"` +} + +// DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver +type DatabaseObserverExporterConfig struct { + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// DatabaseObserverService defines the exporter service component of DatabaseObserver +type DatabaseObserverService struct { + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// PrometheusConfig defines the generated resources for Prometheus +type PrometheusConfig struct { + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` +} + +// DBSecret defines secrets used in reference +type DBSecret struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` +} + +// DBSecretWithVault defines secrets used in reference with vault fields +type DBSecretWithVault struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` + VaultOCID string `json:"vaultOCID,omitempty"` + VaultSecretName string `json:"vaultSecretName,omitempty"` +} + +// DatabaseObserverConfigMap defines configMap used for metrics configuration +type DatabaseObserverConfigMap struct { + Configmap ConfigMapDetails `json:"configMap,omitempty"` +} + +// ConfigMapDetails defines the configmap name +type ConfigMapDetails struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` +} + +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI +type OCIConfigSpec struct { + ConfigMapName string `json:"configMapName,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// DatabaseObserverStatus defines the observed state of DatabaseObserver +type DatabaseObserverStatus struct { + Conditions []metav1.Condition `json:"conditions"` + Status string `json:"status,omitempty"` + ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` + Replicas int `json:"replicas,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" + +// DatabaseObserver is the Schema for the databaseobservers API +// +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string +type DatabaseObserver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseObserverSpec `json:"spec,omitempty"` + Status DatabaseObserverStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseObserverList contains a list of DatabaseObserver +type DatabaseObserverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseObserver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DatabaseObserver{}, &DatabaseObserverList{}) +} diff --git a/apis/observability/v1alpha1/databaseobserver_webhook.go b/apis/observability/v1alpha1/databaseobserver_webhook.go new file mode 100644 index 00000000..585ad3bf --- /dev/null +++ b/apis/observability/v1alpha1/databaseobserver_webhook.go @@ -0,0 +1,185 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strings" +) + +// log is for logging in this package. +var databaseobserverlog = logf.Log.WithName("databaseobserver-resource") + +const ( + AllowedExporterImage = "container-registry.oracle.com/database/observability-exporter" + ErrorSpecValidationMissingConnString = "a required field for database connection string secret is missing or does not have a value" + ErrorSpecValidationMissingDBUser = "a required field for database user secret is missing or does not have a value" + ErrorSpecValidationMissingDBVaultField = "a field for the OCI vault has a value but the other required field is missing or does not have a value" + ErrorSpecValidationMissingOCIConfig = "a field(s) for the OCI Config is missing or does not have a value when fields for the OCI vault has values" + ErrorSpecValidationMissingDBPasswordSecret = "a required field for the database password secret is missing or does not have a value" + ErrorSpecExporterImageNotAllowed = "a different exporter image was found, only official database exporter container images are currently supported" +) + +func (r *DatabaseObserver) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-observability-oracle-com-v1alpha1-databaseobserver,mutating=true,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,verbs=create;update,versions=v1alpha1,name=mdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DatabaseObserver{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DatabaseObserver) Default() { + databaseobserverlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update,path=/validate-observability-oracle-com-v1alpha1-databaseobserver,mutating=false,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,versions=v1alpha1,name=vdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DatabaseObserver{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { + databaseobserverlog.Info("validate create", "name", r.Name) + + var e field.ErrorList + ns := dbcommons.GetWatchNamespaces() + + // Check for namespace/cluster scope access + if _, isDesiredNamespaceWithinScope := ns[r.Namespace]; !isDesiredNamespaceWithinScope && len(ns) > 0 { + e = append(e, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Check required secret for db user has value + if r.Spec.Database.DBUser.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbUser").Child("secret"), r.Spec.Database.DBUser.SecretName, + ErrorSpecValidationMissingDBUser)) + } + + // Check required secret for db connection string has value + if r.Spec.Database.DBConnectionString.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbConnectionString").Child("secret"), r.Spec.Database.DBConnectionString.SecretName, + ErrorSpecValidationMissingConnString)) + } + + // The other vault field must have value if one does + if (r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName == "") || + (r.Spec.Database.DBPassword.VaultSecretName != "" && r.Spec.Database.DBPassword.VaultOCID == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword"), r.Spec.Database.DBPassword, + ErrorSpecValidationMissingDBVaultField)) + } + + // if vault fields have value, ociConfig must have values + if r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName != "" && + (r.Spec.OCIConfig.SecretName == "" || r.Spec.OCIConfig.ConfigMapName == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("ociConfig"), r.Spec.OCIConfig, + ErrorSpecValidationMissingOCIConfig)) + } + + // If all of {DB Password Secret Name and vaultOCID+vaultSecretName} have no value, then error out + if r.Spec.Database.DBPassword.SecretName == "" && + r.Spec.Database.DBPassword.VaultOCID == "" && + r.Spec.Database.DBPassword.VaultSecretName == "" { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword").Child("secret"), r.Spec.Database.DBPassword.SecretName, + ErrorSpecValidationMissingDBPasswordSecret)) + } + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + databaseobserverlog.Info("validate update", "name", r.Name) + var e field.ErrorList + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateDelete() (admission.Warnings, error) { + databaseobserverlog.Info("validate delete", "name", r.Name) + + return nil, nil +} diff --git a/apis/observability/v1alpha1/groupversion_info.go b/apis/observability/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..304840f4 --- /dev/null +++ b/apis/observability/v1alpha1/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v1alpha1 contains API Schema definitions for the observability v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=observability.oracle.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.oracle.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/observability/v1alpha1/zz_generated.deepcopy.go b/apis/observability/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..4b2a29b0 --- /dev/null +++ b/apis/observability/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapDetails) DeepCopyInto(out *ConfigMapDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapDetails. +func (in *ConfigMapDetails) DeepCopy() *ConfigMapDetails { + if in == nil { + return nil + } + out := new(ConfigMapDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecret) DeepCopyInto(out *DBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecret. +func (in *DBSecret) DeepCopy() *DBSecret { + if in == nil { + return nil + } + out := new(DBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecretWithVault) DeepCopyInto(out *DBSecretWithVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecretWithVault. +func (in *DBSecretWithVault) DeepCopy() *DBSecretWithVault { + if in == nil { + return nil + } + out := new(DBSecretWithVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserver) DeepCopyInto(out *DatabaseObserver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserver. +func (in *DatabaseObserver) DeepCopy() *DatabaseObserver { + if in == nil { + return nil + } + out := new(DatabaseObserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverConfigMap) DeepCopyInto(out *DatabaseObserverConfigMap) { + *out = *in + out.Configmap = in.Configmap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverConfigMap. +func (in *DatabaseObserverConfigMap) DeepCopy() *DatabaseObserverConfigMap { + if in == nil { + return nil + } + out := new(DatabaseObserverConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDatabase) DeepCopyInto(out *DatabaseObserverDatabase) { + *out = *in + out.DBUser = in.DBUser + out.DBPassword = in.DBPassword + out.DBWallet = in.DBWallet + out.DBConnectionString = in.DBConnectionString +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDatabase. +func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { + if in == nil { + return nil + } + out := new(DatabaseObserverDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { + *out = *in + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. +func (in *DatabaseObserverExporterConfig) DeepCopy() *DatabaseObserverExporterConfig { + if in == nil { + return nil + } + out := new(DatabaseObserverExporterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverList) DeepCopyInto(out *DatabaseObserverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseObserver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverList. +func (in *DatabaseObserverList) DeepCopy() *DatabaseObserverList { + if in == nil { + return nil + } + out := new(DatabaseObserverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. +func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { + if in == nil { + return nil + } + out := new(DatabaseObserverService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { + *out = *in + out.Database = in.Database + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig + in.Prometheus.DeepCopyInto(&out.Prometheus) + out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. +func (in *DatabaseObserverSpec) DeepCopy() *DatabaseObserverSpec { + if in == nil { + return nil + } + out := new(DatabaseObserverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverStatus. +func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { + if in == nil { + return nil + } + out := new(DatabaseObserverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. +func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { + if in == nil { + return nil + } + out := new(OCIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { + if in == nil { + return nil + } + out := new(PrometheusServiceMonitor) + in.DeepCopyInto(out) + return out +} diff --git a/apis/observability/v4/databaseobserver_types.go b/apis/observability/v4/databaseobserver_types.go new file mode 100644 index 00000000..2b9df606 --- /dev/null +++ b/apis/observability/v4/databaseobserver_types.go @@ -0,0 +1,196 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusEnum string + +// DatabaseObserverSpec defines the desired state of DatabaseObserver +type DatabaseObserverSpec struct { + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` +} + +// DatabaseObserverDatabase defines the database details used for DatabaseObserver +type DatabaseObserverDatabase struct { + DBUser DBSecret `json:"dbUser,omitempty"` + DBPassword DBSecretWithVault `json:"dbPassword,omitempty"` + DBWallet DBSecret `json:"dbWallet,omitempty"` + DBConnectionString DBSecret `json:"dbConnectionString,omitempty"` +} + +// DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver +type DatabaseObserverExporterConfig struct { + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// DatabaseObserverService defines the exporter service component of DatabaseObserver +type DatabaseObserverService struct { + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// PrometheusConfig defines the generated resources for Prometheus +type PrometheusConfig struct { + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` +} + +// DBSecret defines secrets used in reference +type DBSecret struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` +} + +// DBSecretWithVault defines secrets used in reference with vault fields +type DBSecretWithVault struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` + VaultOCID string `json:"vaultOCID,omitempty"` + VaultSecretName string `json:"vaultSecretName,omitempty"` +} + +// DatabaseObserverConfigMap defines configMap used for metrics configuration +type DatabaseObserverConfigMap struct { + Configmap ConfigMapDetails `json:"configMap,omitempty"` +} + +// ConfigMapDetails defines the configmap name +type ConfigMapDetails struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` +} + +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI +type OCIConfigSpec struct { + ConfigMapName string `json:"configMapName,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// DatabaseObserverStatus defines the observed state of DatabaseObserver +type DatabaseObserverStatus struct { + Conditions []metav1.Condition `json:"conditions"` + Status string `json:"status,omitempty"` + ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` + Replicas int `json:"replicas,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" + +// DatabaseObserver is the Schema for the databaseobservers API +// +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string +// +kubebuilder:storageversion +type DatabaseObserver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseObserverSpec `json:"spec,omitempty"` + Status DatabaseObserverStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseObserverList contains a list of DatabaseObserver +type DatabaseObserverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseObserver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DatabaseObserver{}, &DatabaseObserverList{}) +} diff --git a/apis/observability/v4/databaseobserver_webhook.go b/apis/observability/v4/databaseobserver_webhook.go new file mode 100644 index 00000000..c0a5d8b7 --- /dev/null +++ b/apis/observability/v4/databaseobserver_webhook.go @@ -0,0 +1,182 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strings" +) + +// log is for logging in this package. +var databaseobserverlog = logf.Log.WithName("databaseobserver-resource") + +const ( + AllowedExporterImage = "container-registry.oracle.com/database/observability-exporter" + ErrorSpecValidationMissingConnString = "a required field for database connection string secret is missing or does not have a value" + ErrorSpecValidationMissingDBUser = "a required field for database user secret is missing or does not have a value" + ErrorSpecValidationMissingDBVaultField = "a field for the OCI vault has a value but the other required field is missing or does not have a value" + ErrorSpecValidationMissingOCIConfig = "a field(s) for the OCI Config is missing or does not have a value when fields for the OCI vault has values" + ErrorSpecValidationMissingDBPasswordSecret = "a required field for the database password secret is missing or does not have a value" + ErrorSpecExporterImageNotAllowed = "a different exporter image was found, only official database exporter container images are currently supported" +) + +func (r *DatabaseObserver) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-observability-oracle-com-v4-databaseobserver,mutating=true,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,verbs=create;update,versions=v4,name=mdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DatabaseObserver{} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DatabaseObserver) Default() { + databaseobserverlog.Info("default", "name", r.Name) +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-observability-oracle-com-v4-databaseobserver,mutating=false,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,versions=v4,name=vdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DatabaseObserver{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { + databaseobserverlog.Info("validate create", "name", r.Name) + + var e field.ErrorList + ns := dbcommons.GetWatchNamespaces() + + // Check for namespace/cluster scope access + if _, isDesiredNamespaceWithinScope := ns[r.Namespace]; !isDesiredNamespaceWithinScope && len(ns) > 0 { + e = append(e, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Check required secret for db user has value + if r.Spec.Database.DBUser.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbUser").Child("secret"), r.Spec.Database.DBUser.SecretName, + ErrorSpecValidationMissingDBUser)) + } + + // Check required secret for db connection string has value + if r.Spec.Database.DBConnectionString.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbConnectionString").Child("secret"), r.Spec.Database.DBConnectionString.SecretName, + ErrorSpecValidationMissingConnString)) + } + + // The other vault field must have value if one does + if (r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName == "") || + (r.Spec.Database.DBPassword.VaultSecretName != "" && r.Spec.Database.DBPassword.VaultOCID == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword"), r.Spec.Database.DBPassword, + ErrorSpecValidationMissingDBVaultField)) + } + + // if vault fields have value, ociConfig must have values + if r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName != "" && + (r.Spec.OCIConfig.SecretName == "" || r.Spec.OCIConfig.ConfigMapName == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("ociConfig"), r.Spec.OCIConfig, + ErrorSpecValidationMissingOCIConfig)) + } + + // If all of {DB Password Secret Name and vaultOCID+vaultSecretName} have no value, then error out + if r.Spec.Database.DBPassword.SecretName == "" && + r.Spec.Database.DBPassword.VaultOCID == "" && + r.Spec.Database.DBPassword.VaultSecretName == "" { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword").Child("secret"), r.Spec.Database.DBPassword.SecretName, + ErrorSpecValidationMissingDBPasswordSecret)) + } + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + databaseobserverlog.Info("validate update", "name", r.Name) + var e field.ErrorList + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateDelete() (admission.Warnings, error) { + databaseobserverlog.Info("validate delete", "name", r.Name) + + return nil, nil +} diff --git a/apis/observability/v4/groupversion_info.go b/apis/observability/v4/groupversion_info.go new file mode 100644 index 00000000..155b1c11 --- /dev/null +++ b/apis/observability/v4/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v4 contains API Schema definitions for the observability v4 API group +// +kubebuilder:object:generate=true +// +groupName=observability.oracle.com +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.oracle.com", Version: "v4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/observability/v4/zz_generated.deepcopy.go b/apis/observability/v4/zz_generated.deepcopy.go new file mode 100644 index 00000000..d9892643 --- /dev/null +++ b/apis/observability/v4/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v4 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapDetails) DeepCopyInto(out *ConfigMapDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapDetails. +func (in *ConfigMapDetails) DeepCopy() *ConfigMapDetails { + if in == nil { + return nil + } + out := new(ConfigMapDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecret) DeepCopyInto(out *DBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecret. +func (in *DBSecret) DeepCopy() *DBSecret { + if in == nil { + return nil + } + out := new(DBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecretWithVault) DeepCopyInto(out *DBSecretWithVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecretWithVault. +func (in *DBSecretWithVault) DeepCopy() *DBSecretWithVault { + if in == nil { + return nil + } + out := new(DBSecretWithVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserver) DeepCopyInto(out *DatabaseObserver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserver. +func (in *DatabaseObserver) DeepCopy() *DatabaseObserver { + if in == nil { + return nil + } + out := new(DatabaseObserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverConfigMap) DeepCopyInto(out *DatabaseObserverConfigMap) { + *out = *in + out.Configmap = in.Configmap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverConfigMap. +func (in *DatabaseObserverConfigMap) DeepCopy() *DatabaseObserverConfigMap { + if in == nil { + return nil + } + out := new(DatabaseObserverConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDatabase) DeepCopyInto(out *DatabaseObserverDatabase) { + *out = *in + out.DBUser = in.DBUser + out.DBPassword = in.DBPassword + out.DBWallet = in.DBWallet + out.DBConnectionString = in.DBConnectionString +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDatabase. +func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { + if in == nil { + return nil + } + out := new(DatabaseObserverDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { + *out = *in + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. +func (in *DatabaseObserverExporterConfig) DeepCopy() *DatabaseObserverExporterConfig { + if in == nil { + return nil + } + out := new(DatabaseObserverExporterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverList) DeepCopyInto(out *DatabaseObserverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseObserver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverList. +func (in *DatabaseObserverList) DeepCopy() *DatabaseObserverList { + if in == nil { + return nil + } + out := new(DatabaseObserverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. +func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { + if in == nil { + return nil + } + out := new(DatabaseObserverService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { + *out = *in + out.Database = in.Database + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig + in.Prometheus.DeepCopyInto(&out.Prometheus) + out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. +func (in *DatabaseObserverSpec) DeepCopy() *DatabaseObserverSpec { + if in == nil { + return nil + } + out := new(DatabaseObserverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverStatus. +func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { + if in == nil { + return nil + } + out := new(DatabaseObserverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. +func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { + if in == nil { + return nil + } + out := new(OCIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { + if in == nil { + return nil + } + out := new(PrometheusServiceMonitor) + in.DeepCopyInto(out) + return out +} diff --git a/bundle.Dockerfile b/bundle.Dockerfile index 4ca3c52f..d591c4ef 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # diff --git a/commons/adb_family/utils.go b/commons/adb_family/utils.go new file mode 100644 index 00000000..591b3130 --- /dev/null +++ b/commons/adb_family/utils.go @@ -0,0 +1,73 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package adbfamily + +import ( + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/k8s" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// VerifyTargetAdb searches if the target ADB is in the cluster. +// The function returns two values in the following order: +// ocid: the OCID of the target ADB. An empty string is returned if the ocid is nil. +// ownerADB: the resource of the targetADB if it's found in the cluster +func VerifyTargetAdb(kubeClient client.Client, target dbv4.TargetSpec, namespace string) (*dbv4.AutonomousDatabase, error) { + var err error + var ownerAdb *dbv4.AutonomousDatabase + + // Get the target ADB OCID + if target.K8sAdb.Name != nil { + // Find the target ADB using the name of the k8s ADB + ownerAdb = &dbv4.AutonomousDatabase{} + if err := k8s.FetchResource(kubeClient, namespace, *target.K8sAdb.Name, ownerAdb); err != nil { + return nil, err + } + + } else { + // Find the target ADB using the ADB OCID + ownerAdb, err = k8s.FetchAutonomousDatabaseWithOCID(kubeClient, namespace, *target.OciAdb.OCID) + if err != nil { + return nil, err + } + + } + + return ownerAdb, nil +} diff --git a/commons/annotations/annotations.go b/commons/annotations/annotations.go index aa4c4e15..b0196156 100644 --- a/commons/annotations/annotations.go +++ b/commons/annotations/annotations.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -53,8 +53,9 @@ type PatchValue struct { Value interface{} `json:"value"` } -// SetAnnotations attaches the given metadata to the target object -func SetAnnotations(kubeClient client.Client, obj client.Object, anns map[string]string) error { +// PatchAnnotations attaches the given metadata to the target object +// The obj will be updated with the content returned by the cluster +func PatchAnnotations(kubeClient client.Client, obj client.Object, anns map[string]string) error { payload := []PatchValue{} if obj.GetAnnotations() == nil { diff --git a/commons/database/constants.go b/commons/database/constants.go index fac4719f..940a2727 100644 --- a/commons/database/constants.go +++ b/commons/database/constants.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -38,13 +38,19 @@ package commons +const CONTAINER_LISTENER_PORT int32 = 1521 + +const CONTAINER_TCPS_PORT int32 = 2484 + const ORACLE_UID int64 = 54321 const ORACLE_GUID int64 = 54321 const DBA_GUID int64 = 54322 -const NoCloneRef string = "Unavailable" +const SQLPlusCLI string = "sqlplus -s / as sysdba" + +const SQLCLI string = "sql -s / as sysdba" const GetVersionSQL string = "SELECT VERSION_FULL FROM V\\$INSTANCE;" @@ -58,9 +64,6 @@ const RemoveChkFileCMD string = "rm -f \"${ORACLE_BASE}/oradata/.${ORACLE_SID}.n const CreateDBRecoveryDestCMD string = "mkdir -p ${ORACLE_BASE}/oradata/fast_recovery_area" -const ConfigureOEMSQL string = "exec DBMS_XDB_CONFIG.SETHTTPSPORT(5500);" + - "\nalter system register;" - const SetDBRecoveryDestSQL string = "SHOW PARAMETER db_recovery_file_dest;" + "\nALTER SYSTEM SET db_recovery_file_dest_size=50G scope=both sid='*';" + "\nALTER SYSTEM SET db_recovery_file_dest='${ORACLE_BASE}/oradata/fast_recovery_area' scope=both sid='*';" + @@ -91,25 +94,286 @@ const ArchiveLogFalseCMD string = CreateChkFileCMD + " && " + "echo -e \"SHUTDOWN IMMEDIATE; \n STARTUP MOUNT; \n ALTER DATABASE NOARCHIVELOG; \n SELECT log_mode FROM v\\$database; \n ALTER DATABASE OPEN;" + " \n ALTER PLUGGABLE DATABASE ALL OPEN; \n ALTER SYSTEM REGISTER;\" | %s && " + RemoveChkFileCMD -const GetDatabaseRoleCMD string = "SELECT DATABASE_ROLE FROM V\\$DATABASE; " +const StandbyDatabasePrerequisitesSQL string = "ALTER SYSTEM SET db_create_file_dest='/opt/oracle/oradata/';" + + "\nALTER SYSTEM SET db_create_online_log_dest_1='/opt/oracle/oradata/';" + + "\nALTER SYSTEM SWITCH LOGFILE;" + + "\nALTER DATABASE ADD STANDBY LOGFILE THREAD 1 SIZE 200M;" + + "\nALTER DATABASE ADD STANDBY LOGFILE THREAD 1 SIZE 200M;" + + "\nALTER DATABASE ADD STANDBY LOGFILE THREAD 1 SIZE 200M;" + + "\nALTER DATABASE ADD STANDBY LOGFILE THREAD 1 SIZE 200M;" + + "\nALTER SYSTEM SET STANDBY_FILE_MANAGEMENT=AUTO;" + + "\nALTER SYSTEM SET dg_broker_config_file1='/opt/oracle/oradata/dbconfig/dr1${ORACLE_SID}.dat' scope=both;" + + "\nALTER SYSTEM SET dg_broker_config_file2='/opt/oracle/oradata/dbconfig/dr2${ORACLE_SID}.dat';" + + "\nALTER SYSTEM SET dg_broker_start=TRUE;" + +const GetDBOpenMode string = "select open_mode from v\\$database;" + +const ModifyStdbyDBOpenMode string = "alter database recover managed standby database disconnect;" + +const StandbyTnsnamesEntry string = ` +##STANDBYDATABASE_SID## = +(DESCRIPTION = + (ADDRESS = (PROTOCOL = TCP)(HOST = ##STANDBYDATABASE_SERVICE_EXPOSED## )(PORT = 1521)) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = ##STANDBYDATABASE_SID##) + ) +) +` +const PDBTnsnamesEntry string = ` +##PDB_NAME## = +(DESCRIPTION = + (ADDRESS = (PROTOCOL = TCP)(HOST = 0.0.0.0 )(PORT = 1521)) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = ##PDB_NAME##) + ) +) +` + +const PrimaryTnsnamesEntry string = ` +${PRIMARY_SID} = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = TCP)(HOST = ${PRIMARY_IP})(PORT = 1521 )) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = ${PRIMARY_SID}) + ) + ) + ` + +const ListenerEntry string = `LISTENER = +(DESCRIPTION_LIST = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = IPC)(KEY = EXTPROC1)) + (ADDRESS = (PROTOCOL = TCP)(HOST = 0.0.0.0)(PORT = 1521)) + ) +) +SID_LIST_LISTENER = + (SID_LIST = + (SID_DESC = + (GLOBAL_DBNAME = ${ORACLE_SID^^}) + (SID_NAME = ${ORACLE_SID^^}) + (ORACLE_HOME = ${ORACLE_HOME}) + ) + (SID_DESC = + (GLOBAL_DBNAME = DATAGUARD) + (SID_NAME = ${ORACLE_SID^^}) + (ORACLE_HOME = ${ORACLE_HOME}) + ) + (SID_DESC = + (GLOBAL_DBNAME = ${ORACLE_SID^^}_DGMGRL) + (SID_NAME = ${ORACLE_SID^^}) + (ORACLE_HOME = ${ORACLE_HOME}) + (ENVS="TNS_ADMIN=/opt/oracle/oradata/dbconfig/${ORACLE_SID^^}") + ) + ) + +DEDICATED_THROUGH_BROKER_LISTENER=ON +` + +const CreateAdminPasswordFile string = "umask 177\n cat > admin.pwd < dgmgrl.cmd\n umask 022" + +const RemoveAdminPasswordFile string = "rm -rf admin.pwd" + +const RemoveDGMGRLScriptFile string = "rm -rf dgmgrl.cmd" + +const DataguardBrokerMaxPerformanceCMD string = "CREATE CONFIGURATION dg_config AS PRIMARY DATABASE IS ${PRIMARY_SID} CONNECT IDENTIFIER IS ${PRIMARY_DB_CONN_STR};" + + "\nADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + + "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY LogXptMode='ASYNC';" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='ASYNC';" + + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXPERFORMANCE;" + + "\nENABLE CONFIGURATION;" + +const DataguardBrokerMaxAvailabilityCMD string = "CREATE CONFIGURATION dg_config AS PRIMARY DATABASE IS ${PRIMARY_SID} CONNECT IDENTIFIER IS ${PRIMARY_DB_CONN_STR};" + + "\nADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + + "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY LogXptMode='SYNC';" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='SYNC';" + + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXAVAILABILITY;" + + "\nENABLE CONFIGURATION;" + +const DataguardBrokerAddDBMaxPerformanceCMD string = "ADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='ASYNC';" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${SVC_HOST})(PORT=1521))" + + "(CONNECT_DATA=(SERVICE_NAME=${ORACLE_SID}_DGMGRL)(INSTANCE_NAME=${ORACLE_SID})(SERVER=DEDICATED)))';" + + "\nENABLE CONFIGURATION;" + +const DataguardBrokerAddDBMaxAvailabilityCMD string = "ADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='SYNC';" + + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${SVC_HOST})(PORT=1521))" + + "(CONNECT_DATA=(SERVICE_NAME=${ORACLE_SID}_DGMGRL)(INSTANCE_NAME=${ORACLE_SID})(SERVER=DEDICATED)))';" + + "\nENABLE CONFIGURATION;" + +const RemoveStandbyDBFromDGConfgCMD string = "DISABLE DATABASE ${ORACLE_SID};" + + "\nREMOVE DATABASE ${ORACLE_SID};" + +const DBShowConfigCMD string = "SHOW CONFIGURATION;" const DataguardBrokerGetDatabaseCMD string = "SELECT DATABASE || ':' || DATAGUARD_ROLE AS DATABASE FROM V\\$DG_BROKER_CONFIG;" +const EnableFSFOCMD string = "ENABLE FAST_START FAILOVER;" + +const DisableFSFOCMD string = "STOP OBSERVER %s" + + "\nDISABLE FAST_START FAILOVER;" + +const RemoveDataguardConfiguration string = "DISABLE FAST_START FAILOVER;" + + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXPERFORMANCE;" + + "\nREMOVE CONFIGURATION;" + +const GetDatabaseRoleCMD string = "SELECT DATABASE_ROLE FROM V\\$DATABASE; " + const RunDatapatchCMD string = " ( while true; do sleep 60; echo \"Installing patches...\" ; done ) & if ! $ORACLE_HOME/OPatch/datapatch -skip_upgrade_check;" + " then echo \"Datapatch execution has failed.\" ; else echo \"DONE: Datapatch execution.\" ; fi ; kill -9 $!;" -const GetSqlpatchDescriptionSQL string = "select TARGET_VERSION || ' (' || PATCH_ID || ')' as patchinfo from dba_registry_sqlpatch order by action_time desc;" +const GetSqlpatchDescriptionSQL string = "select TARGET_VERSION || ' (' || ACTION || ' of ' || PATCH_ID || ')' as patchinfo from dba_registry_sqlpatch order by action_time desc;" const GetSqlpatchStatusSQL string = "select status from dba_registry_sqlpatch order by action_time desc;" const GetSqlpatchVersionSQL string = "select SOURCE_VERSION || ':' || TARGET_VERSION as versions from dba_registry_sqlpatch order by action_time desc;" -const GetCheckpointFileCMD string = "find ${ORACLE_BASE}/oradata -name .${ORACLE_SID}${CHECKPOINT_FILE_EXTN} " +const GetCheckpointFileCMD string = "find ${ORACLE_BASE}/oradata -maxdepth 1 -name .${ORACLE_SID}${CHECKPOINT_FILE_EXTN}" const GetEnterpriseEditionFileCMD string = "if [ -f ${ORACLE_BASE}/oradata/dbconfig/$ORACLE_SID/.docker_enterprise ]; then ls ${ORACLE_BASE}/oradata/dbconfig/$ORACLE_SID/.docker_enterprise; fi " const GetStandardEditionFileCMD string = "if [ -f ${ORACLE_BASE}/oradata/dbconfig/$ORACLE_SID/.docker_standard ]; then ls ${ORACLE_BASE}/oradata/dbconfig/$ORACLE_SID/.docker_standard; fi " +const CreateSIDlinkCMD string = "cd ${ORACLE_BASE}/oradata && test ! -e $ORACLE_SID && ln -s $(basename $PRIMARY_DB_CONN_STR)/$ORACLE_SID" + +const GetPdbsSQL string = "select name from v\\$pdbs where name not like 'PDB\\$SEED' and open_mode like 'READ WRITE';" + +const OpenPDBSeed = "alter pluggable database pdb\\$seed close;" + + "\nalter pluggable database pdb\\$seed open read only;" + +const SetAdminUsersSQL string = "CREATE USER C##DBAPI_CDB_ADMIN IDENTIFIED BY \\\"%[1]s\\\" ACCOUNT UNLOCK CONTAINER=ALL;" + + "\nalter user C##DBAPI_CDB_ADMIN identified by \\\"%[1]s\\\" account unlock;" + + "\nGRANT DBA TO C##DBAPI_CDB_ADMIN CONTAINER = ALL;" + + "\nGRANT PDB_DBA TO C##DBAPI_CDB_ADMIN CONTAINER = ALL;" + + "\nCREATE USER C##_DBAPI_PDB_ADMIN IDENTIFIED BY \\\"%[1]s\\\" CONTAINER=ALL ACCOUNT UNLOCK;" + + "\nalter user C##_DBAPI_PDB_ADMIN identified by \\\"%[1]s\\\" account unlock;" + + "\nGRANT DBA TO C##_DBAPI_PDB_ADMIN CONTAINER = ALL;" + + "\nalter pluggable database pdb\\$seed close;" + + "\nalter pluggable database pdb\\$seed open read write force;" + +const GetUserORDSSchemaStatusSQL string = "alter session set container=%[2]s;" + + "\nselect 'STATUS:'||status as status from ords_metadata.ords_schemas where upper(parsing_schema) = upper('%[1]s');" + +const CreateORDSSchemaSQL = "\nALTER SESSION SET CONTAINER=%[3]s;" + + "\nCREATE USER %[1]s IDENTIFIED BY \\\"%[2]s\\\";" + + "\nGRANT CONNECT, RESOURCE, DBA, PDB_DBA TO %[1]s;" + +const EnableORDSSchemaSQL string = "\nALTER SESSION SET CONTAINER=%[4]s;" + + "\nGRANT INHERIT PRIVILEGES ON USER SYS TO ORDS_METADATA;" + + "\nexec ORDS.enable_schema(p_enabled => %[2]s ,p_schema => '%[1]s',p_url_mapping_type => 'BASE_PATH',p_url_mapping_pattern => '%[3]s',p_auto_rest_auth => FALSE);" + + // SetupORDSCMD is run only for the FIRST TIME, ORDS is installed. Once ORDS is installed, we delete the pod that ran SetupORDSCMD and create new ones. + // Newly created pod doesn't run this SetupORDSCMD. +const SetupORDSCMD string = "$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.auth.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.management.services.disabled false" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.admin.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property dbc.auth.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property restEnabledSql.active true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property db.serviceNameSuffix \"\" " + // Mandatory when ORDS Installing at CDB Level -> Maps PDB's + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.InitialLimit 5" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.MaxLimit 20" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.InactivityTimeout 300" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property feature.sdw true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property security.verifySSL false" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.maxRows 1000" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property pdb.open.asneeded true" + + "\numask 177" + + "\necho db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA > cdbAdmin.properties" + + "\necho db.cdb.adminUser.password=\"%[4]s\" >> cdbAdmin.properties" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_pu cdbAdmin.properties" + + "\nrm -f cdbAdmin.properties" + + "\necho db.username=APEX_LISTENER > apexlistener" + + "\necho db.password=\"%[2]s\" >> apexlistener" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_al apexlistener" + + "\nrm -f apexlistener" + + "\necho db.username=APEX_REST_PUBLIC_USER > apexRestPublicUser" + + "\necho db.password=\"%[2]s\" >> apexRestPublicUser" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_rt apexRestPublicUser" + + "\nrm -f apexRestPublicUser" + + "\necho db.username=APEX_PUBLIC_USER > apexPublicUser" + + "\necho db.password=\"%[2]s\" >> apexPublicUser" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex apexPublicUser" + + "\nrm -f apexPublicUser" + + "\necho db.adminUser=C##_DBAPI_PDB_ADMIN > pdbAdmin.properties" + + "\necho db.adminUser.password=\"%[4]s\">> pdbAdmin.properties" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_pu pdbAdmin.properties" + + "\nrm -f pdbAdmin.properties" + + "\necho -e \"%[1]s\n%[1]s\" > sqladmin.passwd" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war user ${ORDS_USER} \"SQL Administrator , System Administrator , SQL Developer , oracle.dbtools.autorest.any.schema \" < sqladmin.passwd" + + "\nrm -f sqladmin.passwd" + + "\numask 022" + + "\nsed -i 's,jetty.port=8888,jetty.secure.port=8443\\nssl.cert=\\nssl.cert.key=\\nssl.host=%[3]s,g' /opt/oracle/ords/config/ords/standalone/standalone.properties " + + "\nsed -i 's,standalone.static.path=/opt/oracle/ords/doc_root/i,standalone.static.path=/opt/oracle/ords/config/apex/images,g' /opt/oracle/ords/config/ords/standalone/standalone.properties" + +const InitORDSCMD string = "if [ -f $ORDS_HOME/config/ords/defaults.xml ]; then exit ;fi;" + + "\nexport APEXI=$ORDS_HOME/config/apex/images" + + "\n$ORDS_HOME/runOrds.sh --setuponly" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.auth.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.management.services.disabled false" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property database.api.admin.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property dbc.auth.enabled true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property restEnabledSql.active true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property db.serviceNameSuffix \"\" " + // Mandatory when ORDS Installing at CDB Level -> Maps PDB's + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.InitialLimit 5" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.MaxLimit 20" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.InactivityTimeout 300" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property feature.sdw true" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property security.verifySSL false" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-property jdbc.maxRows 1000" + + "\nmkdir -p $ORDS_HOME/config/ords/conf" + + "\numask 177" + + "\necho db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA > cdbAdmin.properties" + + "\necho db.cdb.adminUser.password=\"${ORACLE_PWD}\" >> cdbAdmin.properties" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_pu cdbAdmin.properties" + + "\nrm -f cdbAdmin.properties" + + "\necho db.adminUser=C##_DBAPI_PDB_ADMIN > pdbAdmin.properties" + + "\necho db.adminUser.password=\"${ORACLE_PWD}\">> pdbAdmin.properties" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_pu pdbAdmin.properties" + + "\nrm -f pdbAdmin.properties" + + "\necho -e \"${ORDS_PWD}\n${ORDS_PWD}\" > sqladmin.passwd" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war user ${ORDS_USER} \"SQL Administrator , System Administrator , SQL Developer , oracle.dbtools.autorest.any.schema \" < sqladmin.passwd" + + "\nrm -f sqladmin.passwd" + + "\numask 022" + +const DbConnectString string = "CONN_STRING=sys/%[1]s@%[2]s:1521/%[3]s" + +const GetSessionInfoSQL string = "select s.sid || ',' || s.serial# as Info FROM v\\$session s, v\\$process p " + + "WHERE (s.username = 'ORDS_PUBLIC_USER' or " + + "s.username = 'APEX_PUBLIC_USER' or " + + "s.username = 'APEX_REST_PUBLIC_USER' or " + + "s.username = 'APEX_LISTENER' or " + + "s.username = 'C##_DBAPI_CDB_ADMIN' or " + + "s.username = 'C##_DBAPI_PDB_ADMIN' ) AND p.addr(+) = s.paddr;" + +const KillSessionSQL string = "alter system kill session '%[1]s';" + +const DropAdminUsersSQL string = "drop user C##DBAPI_CDB_ADMIN cascade;" + + "\ndrop user C##_DBAPI_PDB_ADMIN cascade;" + +const UninstallORDSCMD string = "\numask 177" + + "\necho -e \"1\n${ORACLE_HOST}\n${ORACLE_PORT}\n1\n${ORACLE_SERVICE}\nsys\n%[1]s\n%[1]s\n1\" > ords.cred" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war uninstall advanced < ords.cred" + + "\nrm -f ords.cred" + + "\numask 022" + + "\nrm -f /opt/oracle/ords/config/ords/defaults.xml" + + "\nrm -f /opt/oracle/ords/config/ords/credentials" + + "\nrm -rf /opt/oracle/ords/config/ords/conf" + + "\nrm -rf /opt/oracle/ords/config/ords/standalone" + + "\nrm -rf /opt/oracle/ords/config/ords/apex" + +const GetORDSStatus string = "curl -sSkvf -k -X GET http://localhost:8181/ords/_/db-api/stable/metadata-catalog/" + +const ORDSReadinessProbe string = "curl -sSkvf -k -X GET http://localhost:8181/ords/_/landing" + +const ValidateAdminPassword string = "conn sys/\\\"%s\\\"@${ORACLE_SID} as sysdba\nshow user" + const ReconcileError string = "ReconcileError" const ReconcileErrorReason string = "LastReconcileCycleFailed" @@ -140,7 +404,9 @@ const StatusReady string = "Healthy" const StatusError string = "Error" -const ValueUnavailable string = "Unknown" +const StatusUnknown string = "Unknown" + +const ValueUnavailable string = "Unavailable" const NoExternalIp string = "Node ExternalIP unavailable" @@ -156,22 +422,137 @@ const WalletEntriesCMD string = "umask 177\ncat > wallet.passwd < /dev/null; do sleep 0.5; done; fi " -const AlterSgaPgaCpuCMD string = "echo -e \"alter system set sga_target=%dM scope=both; \n alter system set pga_aggregate_target=%dM scope=both; \n alter system set cpu_count=%d; \" | %s " +const InitPrebuiltDbCMD string = "if [ ! -d /mnt/oradata/${ORACLE_SID} -a -d $ORACLE_BASE/oradata/${ORACLE_SID} ]; then cp -v $ORACLE_BASE/oradata/.${ORACLE_SID}$CHECKPOINT_FILE_EXTN /mnt/oradata && " + + " cp -vr $ORACLE_BASE/oradata/${ORACLE_SID} /mnt/oradata && cp -vr $ORACLE_BASE/oradata/dbconfig /mnt/oradata; fi " +const AlterSgaPgaCMD string = "echo -e \"alter system set sga_target=%dM scope=both; \n alter system set pga_aggregate_target=%dM scope=both; \" | %s " +const AlterCpuCountCMD string = "echo -e \"alter system set cpu_count=%d; \" | %s" const AlterProcessesCMD string = "echo -e \"alter system set processes=%d scope=spfile; \" | %s && " + CreateChkFileCMD + " && " + "echo -e \"SHUTDOWN IMMEDIATE; \n STARTUP MOUNT; \n ALTER DATABASE OPEN; \n ALTER PLUGGABLE DATABASE ALL OPEN; \n ALTER SYSTEM REGISTER;\" | %s && " + RemoveChkFileCMD -const GetInitParamsSQL string = "echo -e \"select name,display_value from v\\$parameter where name in ('sga_target','pga_aggregate_target','cpu_count','processes') order by name asc;\" | %s" +const GetInitParamsSQL string = "column name format a20;" + + "\ncolumn display_value format a20;" + + "\nset linesize 100 pagesize 50;" + + "\nselect name,display_value from v\\$parameter where name in ('sga_target','pga_aggregate_target','cpu_count','processes') order by name asc;" + +const UnzipApexOnSIDBPod string = "if [ -f /opt/oracle/oradata/apex-latest.zip ]; then unzip -o /opt/oracle/oradata/apex-latest.zip -d /opt/oracle/oradata/${ORACLE_SID^^}; else echo \"apex-latest.zip not found\"; fi;" -const UnzipApex string = "if [ -f /opt/oracle/oradata/apex-latest.zip ]; then unzip -o /opt/oracle/oradata/apex-latest.zip -d /opt/oracle/oradata/${ORACLE_SID^^}; else echo \"apex-latest.zip not found\"; fi;" +const UnzipApexOnORDSPod string = "if [ -f /opt/oracle/ords/config/ords/apex-latest.zip ]; then cd /opt/oracle/ords/config/ords && jar -xf /opt/oracle/ords/config/ords/apex-latest.zip; else echo \"apex-latest.zip not found\"; fi;" const ChownApex string = " chown oracle:oinstall /opt/oracle/oradata/${ORACLE_SID^^}/apex;" const InstallApex string = "if [ -f /opt/oracle/oradata/${ORACLE_SID^^}/apex/apexins.sql ]; then ( while true; do sleep 60; echo \"Installing Apex...\" ; done ) & " + " cd /opt/oracle/oradata/${ORACLE_SID^^}/apex && echo -e \"@apexins.sql SYSAUX SYSAUX TEMP /i/\" | %[1]s && kill -9 $!; else echo \"Apex Folder doesn't exist\" ; fi ;" -const IsApexInstalled string = "select 'APEXVERSION:'||version as version FROM DBA_REGISTRY WHERE COMP_ID='APEX';" - -const UninstallApex string = "if [ -f /opt/oracle/oradata/${ORACLE_SID^^}/apex/apxremov.sql ]; then ( while true; do sleep 60; echo \"Uninstalling Apex...\" ; done ) & " + - " cd /opt/oracle/oradata/${ORACLE_SID^^}/apex && echo -e \"@apxremov.sql\" | %[1]s && kill -9 $!; else echo \"Apex Folder doesn't exist\" ; fi ;" +const InstallApexInContainer string = "cd ${APEX_HOME}/${APEX_VER} && echo -e \"@apxsilentins.sql SYSAUX SYSAUX TEMP /i/ %[1]s %[1]s %[1]s %[1]s;\n" + + "@apex_rest_config_core.sql;\n" + + "exec APEX_UTIL.set_workspace(p_workspace => 'INTERNAL');\n" + + "exec APEX_UTIL.EDIT_USER(p_user_id => APEX_UTIL.GET_USER_ID('ADMIN'), p_user_name => 'ADMIN', p_change_password_on_first_use => 'Y');\n" + + "\" | sql -s sys/%[2]s@${ORACLE_HOST}:${ORACLE_PORT}/%[3]s as sysdba;" + +const IsApexInstalled string = "echo -e \"select 'APEXVERSION:'||version as version FROM DBA_REGISTRY WHERE COMP_ID='APEX';\"" + + " | sql -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" + +const UninstallApex string = "cd ${APEX_HOME}/${APEX_VER} && echo -e \"@apxremov.sql\n\" | sql -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" + +const ConfigureApexRest string = "if [ -f ${APEX_HOME}/${APEX_VER}/apex_rest_config.sql ]; then cd ${ORDS_HOME}/config/apex && " + + "echo -e \"%[1]s\n%[1]s\" | %[2]s ; else echo \"Apex Folder doesn't exist\" ; fi ;" + +const AlterApexUsers string = "\nALTER SESSION SET CONTAINER=%[2]s;" + + "\n ALTER USER APEX_PUBLIC_USER IDENTIFIED BY \\\"%[1]s\\\" ACCOUNT UNLOCK; " + + "\n ALTER USER APEX_REST_PUBLIC_USER IDENTIFIED BY \\\"%[1]s\\\" ACCOUNT UNLOCK;" + + "\n ALTER USER APEX_LISTENER IDENTIFIED BY \\\"%[1]s\\\" ACCOUNT UNLOCK;" + + "\nexec APEX_UTIL.set_workspace(p_workspace => 'INTERNAL');" + + "\nexec APEX_UTIL.EDIT_USER(p_user_id => APEX_UTIL.GET_USER_ID('ADMIN'), p_user_name => 'ADMIN', p_web_password => '%[1]s', p_new_password => '%[1]s');\n" + +const CopyApexImages string = " ( while true; do sleep 60; echo \"Copying Apex Images...\" ; done ) & mkdir -p /opt/oracle/oradata/${ORACLE_SID^^}_ORDS/apex/images && " + + " cp -R /opt/oracle/oradata/${ORACLE_SID^^}/apex/images/* /opt/oracle/oradata/${ORACLE_SID^^}_ORDS/apex/images; chown -R oracle:oinstall /opt/oracle/oradata/${ORACLE_SID^^}_ORDS/apex; kill -9 $!;" + +const ApexAdmin string = "BEGIN" + + "\napex_util.set_security_group_id(p_security_group_id => 10); APEX_UTIL.REMOVE_USER(p_user_name => 'ADMIN');" + + "\nCOMMIT;" + + "\nEND;" + + "\n/" + + "\nBEGIN" + + "\nAPEX_UTIL.create_user(p_user_name => 'ADMIN',p_email_address => 'admin@oracle.com',p_web_password => '%[1]s',p_developer_privs => 'ADMIN',p_failed_access_attempts => '5' ," + + " p_allow_app_building_yn => 'Y' ,p_allow_sql_workshop_yn => 'Y' ,p_allow_websheet_dev_yn => 'Y' , p_allow_team_development_yn => 'Y' , p_change_password_on_first_use => 'N' );" + + "apex_util.unlock_account(p_user_name => 'ADMIN'); APEX_UTIL.set_security_group_id( null );" + + "\nCOMMIT;" + + "\nEND;" + + "\n/" + + "\nALTER SESSION SET CONTAINER=%[2]s;" + + "\nBEGIN" + + "\napex_util.set_security_group_id(p_security_group_id => 10); APEX_UTIL.REMOVE_USER(p_user_name => 'ADMIN');" + + "\nCOMMIT;" + + "\nEND;" + + "\n/" + + "\nBEGIN" + + "\nAPEX_UTIL.create_user(p_user_name => 'ADMIN',p_email_address => 'admin@oracle.com',p_web_password => '%[1]s',p_developer_privs => 'ADMIN',p_failed_access_attempts => '5' ," + + " p_allow_app_building_yn => 'Y' ,p_allow_sql_workshop_yn => 'Y' ,p_allow_websheet_dev_yn => 'Y' , p_allow_team_development_yn => 'Y' , p_change_password_on_first_use => 'N' );" + + "apex_util.unlock_account(p_user_name => 'ADMIN'); APEX_UTIL.set_security_group_id( null );" + + "\nCOMMIT;" + + "\nEND;" + + "\n/" + +// SetApexUsers is used to set Apex Users, pod that runs SetApexUsers is deleted and new ones is created. +const SetApexUsers string = "\numask 177" + + "\necho db.username=APEX_LISTENER > apexlistener" + + "\necho db.password=\"%[1]s\" >> apexlistener" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_al apexlistener" + + "\nrm -f apexlistener" + + "\necho db.username=APEX_REST_PUBLIC_USER > apexRestPublicUser" + + "\necho db.password=\"%[1]s\" >> apexRestPublicUser" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex_rt apexRestPublicUser" + + "\nrm -f apexRestPublicUser" + + "\necho db.username=APEX_PUBLIC_USER > apexPublicUser" + + "\necho db.password=\"%[1]s\" >> apexPublicUser" + + "\n$JAVA_HOME/bin/java -jar $ORDS_HOME/ords.war set-properties --conf apex apexPublicUser" + + "\nrm -f apexPublicUser" + + "\numask 022" + +// Command to enable/disable MongoDB API support in ords pods +const ConfigMongoDb string = "ords config set mongo.enabled %[1]s" + +// Get Sid, Pdbname, Edition for prebuilt db +const GetSidPdbEditionCMD string = "echo $ORACLE_SID,$ORACLE_PDB,$ORACLE_EDITION;" + +// Command to enable TCPS as a formatted string. The parameter would be the port at which TCPS is enabled. +const EnableTcpsCMD string = "$ORACLE_BASE/$CONFIG_TCPS_FILE" + +// Command for TCPS certs renewal to prevent their expiry. It is same as the EnableTcpsCMD +const RenewCertsCMD string = EnableTcpsCMD + +// Command to disable TCPS +const DisableTcpsCMD string = "$ORACLE_BASE/$CONFIG_TCPS_FILE disable" + +// Location of tls certs +const TlsCertsLocation string = "/run/secrets/tls_secret" + +// Check Mount in pods +const PodMountsCmd string = "awk '$2 == \"%s\" {print}' /proc/mounts" + +// TCPS clientWallet update command +const ClientWalletUpdate string = "sed -i -e 's/HOST.*$/HOST=%s)/g' -e 's/PORT.*$/PORT=%d)/g' ${ORACLE_BASE}/oradata/clientWallet/${ORACLE_SID}/tnsnames.ora" + +// TCPS clientWallet location +const ClientWalletLocation string = "/opt/oracle/oradata/clientWallet/%s" + +// Service Patch Payloads +// Three port payload: one OEM express, one TCP and one TCPS port +const ThreePortPayload string = "{\"spec\": { \"type\": \"%s\", \"ports\": [{\"name\": \"xmldb\", \"port\": 5500, \"protocol\": \"TCP\"},{%s},{%s}]}}" + +// Two port payload: one OEM express, one TCP/TCPS port +const TwoPortPayload string = "{\"spec\": { \"type\": \"%s\", \"ports\": [{\"name\": \"xmldb\", \"port\": 5500, \"protocol\": \"TCP\"},{%s}]}}" + +// Payload section for listener port +const LsnrPort string = "\"name\": \"listener\", \"protocol\": \"TCP\", \"port\": %d, \"targetPort\": 1521" + +// Payload section for listener node port +const LsnrNodePort string = "\"name\": \"listener\", \"protocol\": \"TCP\", \"port\": 1521, \"nodePort\": %d" + +// Payload section for TCPS port +const TcpsPort string = "\"name\": \"listener-tcps\", \"protocol\": \"TCP\", \"port\": %d, \"targetPort\": 2484" + +// Payload section for TCPS node port +const TcpsNodePort string = "\"name\": \"listener-tcps\", \"protocol\": \"TCP\", \"port\": 2484, \"nodePort\": %d" diff --git a/commons/database/podbuilder.go b/commons/database/podbuilder.go new file mode 100644 index 00000000..c704c4fc --- /dev/null +++ b/commons/database/podbuilder.go @@ -0,0 +1,108 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package commons + +import ( + "k8s.io/apimachinery/pkg/types" + + corev1 "k8s.io/api/core/v1" +) + +type PodBuilder interface { + SetNamespacedName(types.NamespacedName) *PodBuilder + SetLabels(map[string]string) *PodBuilder + SetTerminationGracePeriodSeconds(int64) *PodBuilder + SetNodeSelector(map[string]string) *PodBuilder + SetSecurityContext(corev1.PodSecurityContext) *PodBuilder + SetImagePullSecrets(string) *PodBuilder + AppendContainers(corev1.Container) *PodBuilder + Build() corev1.Pod +} + +type RealPodBuilder struct { + pod corev1.Pod +} + +func (rpb *RealPodBuilder) SetNamespacedName(namespacedName types.NamespacedName) *RealPodBuilder { + rpb.pod.ObjectMeta.Name = namespacedName.Name + rpb.pod.ObjectMeta.Namespace = namespacedName.Namespace + return rpb +} + +func (rpb *RealPodBuilder) SetLabels(labels map[string]string) *RealPodBuilder { + rpb.pod.ObjectMeta.Labels = labels + return rpb +} + +func (rpb *RealPodBuilder) SetTerminationGracePeriodSeconds(terminationGracePeriod int64) *RealPodBuilder { + rpb.pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod + return rpb +} + +func (rpb *RealPodBuilder) SetNodeSelector(nsRule map[string]string) *RealPodBuilder { + rpb.pod.Spec.NodeSelector = nsRule + return rpb +} + +func (rpb *RealPodBuilder) SetSecurityContext(podSecurityContext corev1.PodSecurityContext) *RealPodBuilder { + rpb.pod.Spec.SecurityContext = &podSecurityContext + return rpb +} + +func (rpb *RealPodBuilder) SetImagePullSecrets(imagePullSecret string) *RealPodBuilder { + rpb.pod.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: imagePullSecret, + }, + } + return rpb +} + +func (rpb *RealPodBuilder) AppendContainers(container corev1.Container) *RealPodBuilder { + rpb.pod.Spec.Containers = append(rpb.pod.Spec.Containers, container) + return rpb +} + +func (rpb *RealPodBuilder) Build() corev1.Pod { + return rpb.pod +} + +func NewRealPodBuilder() *RealPodBuilder { + return &RealPodBuilder{} +} diff --git a/commons/database/svcbuilder.go b/commons/database/svcbuilder.go new file mode 100644 index 00000000..8029c8ee --- /dev/null +++ b/commons/database/svcbuilder.go @@ -0,0 +1,99 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package commons + +import ( + corev1 "k8s.io/api/core/v1" +) + +type ServiceBuilder interface { + SetName(string) *ServiceBuilder + SetNamespace(string) *ServiceBuilder + SetLabels(map[string]string) *ServiceBuilder + SetAnnotation(map[string]string) *ServiceBuilder + SetPorts([]corev1.ServicePort) *ServiceBuilder + SetSelector(map[string]string) *ServiceBuilder + SetPublishNotReadyAddresses(bool) *ServiceBuilder + SetServiceType(corev1.ServiceType) *ServiceBuilder + Build() *corev1.Service +} + +type RealServiceBuilder struct { + service corev1.Service +} + +func (rsb *RealServiceBuilder) SetName(name string) *RealServiceBuilder { + rsb.service.ObjectMeta.Name = name + return rsb +} +func (rsb *RealServiceBuilder) SetNamespace(namespace string) *RealServiceBuilder { + rsb.service.ObjectMeta.Namespace = namespace + return rsb +} +func (rsb *RealServiceBuilder) SetLabels(labels map[string]string) *RealServiceBuilder { + rsb.service.ObjectMeta.Labels = labels + return rsb +} +func (rsb *RealServiceBuilder) SetAnnotation(annotations map[string]string) *RealServiceBuilder { + rsb.service.ObjectMeta.Annotations = annotations + return rsb +} +func (rsb *RealServiceBuilder) SetPorts(ports []corev1.ServicePort) *RealServiceBuilder { + rsb.service.Spec.Ports = ports + return rsb +} +func (rsb *RealServiceBuilder) SetSelector(selector map[string]string) *RealServiceBuilder { + rsb.service.Spec.Selector = selector + return rsb +} +func (rsb *RealServiceBuilder) SetPublishNotReadyAddresses(flag bool) *RealServiceBuilder { + rsb.service.Spec.PublishNotReadyAddresses = flag + return rsb +} +func (rsb *RealServiceBuilder) SetType(serviceType corev1.ServiceType) *RealServiceBuilder { + rsb.service.Spec.Type = serviceType + return rsb +} +func (rsb *RealServiceBuilder) Build() corev1.Service { + return rsb.service +} + +func NewRealServiceBuilder() *RealServiceBuilder { + return &RealServiceBuilder{} +} diff --git a/commons/database/utils.go b/commons/database/utils.go index 4e1a143d..e0536642 100644 --- a/commons/database/utils.go +++ b/commons/database/utils.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -45,10 +45,14 @@ import ( "errors" "fmt" "math/rand" + "os" + "strconv" "strings" "time" + "unicode" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" @@ -68,6 +72,8 @@ import ( var requeueY ctrl.Result = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second} var requeueN ctrl.Result = ctrl.Result{} +var ErrNoReadyPod = errors.New("SingleInstanceDatabase has no ready pod currently") + // Filter events that trigger reconcilation func ResourceEventHandler() predicate.Predicate { return predicate.Funcs{ @@ -98,6 +104,7 @@ func ResourceEventHandler() predicate.Predicate { if oldStatus != newStatus { return true } + } // Ignore updates to CR status in which case metadata.Generation does not change // Reconcile if object Deletion Timestamp Set @@ -244,9 +251,7 @@ func ExecCommand(r client.Reader, config *rest.Config, podName string, namespace pod := &corev1.Pod{} err := r.Get(ctx, types.NamespacedName{Name: podName, Namespace: namespace}, pod) if err != nil { - return "", fmt.Errorf("could not get pod info: %v", err) - } else { - log.Info("Pod Found", "Name : ", podName) + return "", fmt.Errorf("could not find pod to execute command: %v", err) } client, err := kubernetes.NewForConfig(config) if err != nil { @@ -273,7 +278,7 @@ func ExecCommand(r client.Reader, config *rest.Config, podName string, namespace Tty: false, }) if err != nil { - return "", fmt.Errorf("could not execute: %v", err) + return "", err } if execErr.Len() > 0 { return "", fmt.Errorf("stderr: %v", execErr.String()) @@ -294,13 +299,14 @@ func GenerateRandomString(n int) string { // retuns Ready Pod,No of replicas ( Only running and Pending Pods) ,available pods , Total No of Pods of a particular CRD func FindPods(r client.Reader, version string, image string, name string, namespace string, ctx context.Context, - req ctrl.Request) (corev1.Pod, int, []corev1.Pod, int, error) { + req ctrl.Request) (corev1.Pod, int, []corev1.Pod, []corev1.Pod, error) { log := ctrllog.FromContext(ctx).WithValues("FindPods", req.NamespacedName) // "available" stores list of pods which can be deleted while scaling down i.e the pods other than one of Ready Pods // There are multiple ready pods possible in OracleRestDataService , while others have atmost one readyPod var available []corev1.Pod + var podsMarkedToBeDeleted []corev1.Pod var readyPod corev1.Pod // To Store the Ready Pod ( Pod that Passed Readiness Probe . Will be shown as 1/1 Running ) podList := &corev1.PodList{} @@ -309,18 +315,17 @@ func FindPods(r client.Reader, version string, image string, name string, namesp // List retrieves list of objects for a given namespace and list options. if err := r.List(ctx, podList, listOpts...); err != nil { log.Error(err, "Failed to list pods of "+name, "Namespace", namespace, "Name", name) - return readyPod, 0, available, 0, err + return readyPod, 0, available, podsMarkedToBeDeleted, err } // r.List() lists all the pods in running, pending,terminating stage matching listOpts . so filter them // Fetch the Running and Pending Pods - podsMarkedToBeDeleted := 0 for _, pod := range podList.Items { // Return pods having Image = image (or) if image = ""(Needed in case when called findpods with "" image) if pod.Spec.Containers[0].Image == image || image == "" { if pod.ObjectMeta.DeletionTimestamp != nil { - podsMarkedToBeDeleted += 1 + podsMarkedToBeDeleted = append(podsMarkedToBeDeleted, pod) continue } if pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodPending { @@ -364,7 +369,7 @@ func CheckDBConfig(readyPod corev1.Pod, r client.Reader, config *rest.Config, } else { out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", CheckModesSQL, GetSqlClient(edition))) + fmt.Sprintf("echo -e \"%s\" | %s", CheckModesSQL, SQLPlusCLI)) if err != nil { log.Error(err, "Error in ExecCommand()") return false, false, false, requeueY @@ -399,6 +404,74 @@ func CheckDBConfig(readyPod corev1.Pod, r client.Reader, config *rest.Config, return flashBackStatus, archiveLogStatus, forceLoggingStatus, requeueN } +func CheckDBInitParams(sidbReadyPod corev1.Pod, r client.Reader, config *rest.Config, + ctx context.Context, req ctrl.Request) (int, int, int, int, error) { + log := ctrllog.FromContext(ctx).WithValues("CheckDBParams", req.NamespacedName) + + if sidbReadyPod.Name == "" { + log.Info("No Pod is Ready") + // As No pod is ready now , turn on mode when pod is ready . so requeue the request + return -1, -1, -1, -1, fmt.Errorf("no pod is ready") + } + + log.Info("Check database init params") + + out, err := ExecCommand(r, config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba", GetInitParamsSQL)) + if err != nil { + log.Error(err, err.Error()) + return -1, -1, -1, -1, err + } + if strings.Contains(out, "no rows selected") { + return -1, -1, -1, -1, errors.New("cannot fetch values for database init params") + } + if strings.Contains(out, "ORA-") { + return -1, -1, -1, -1, fmt.Errorf("error while getting database init params\n%s", out) + } + log.Info(fmt.Sprintf("Database initParams are \n%s", out)) + initParams := strings.Split(out, "\n") + initParams = initParams[3:] + log.Info(fmt.Sprintf("%v", initParams)) + log.Info(fmt.Sprintf("length of initParams is %v", len(initParams))) + log.Info("After parsing init param are " + strings.Join(initParams, ",")) + + log.Info("Parsing cpuCount") + log.Info(strings.Fields(initParams[0])[1]) + cpu_count, err := strconv.Atoi(strings.Fields(initParams[0])[1]) + if err != nil { + return -1, -1, -1, -1, err + } + log.Info("After parsing cpuCount", "cpuCount", cpu_count) + + log.Info("Parsing pga_aggregate_target_value") + log.Info(strings.Fields(initParams[1])[1]) + pga_aggregate_target_value := strings.Fields(initParams[1])[1] + pga_aggregate_target, err := strconv.Atoi(pga_aggregate_target_value[0 : len(pga_aggregate_target_value)-1]) + if err != nil { + return -1, -1, -1, -1, err + } + log.Info("After parsing pga_aggregate_target_value", "pga_aggregate_target_value", pga_aggregate_target) + + log.Info("Parsing processes") + log.Info(strings.Fields(initParams[2])[1]) + processes, err := strconv.Atoi(strings.Fields(initParams[2])[1]) + if err != nil { + return -1, -1, -1, -1, err + } + log.Info("After parsing processes", "processes", processes) + + log.Info("parsing sga_target_value") + log.Info(strings.Fields(initParams[3])[1]) + sga_target_value := strings.Fields(initParams[3])[1] + sga_target, err := strconv.Atoi(sga_target_value[0 : len(sga_target_value)-1]) + if err != nil { + return -1, -1, -1, -1, err + } + log.Info("After parsing sgaTarget", "sgaTarget", sga_target) + + return cpu_count, pga_aggregate_target, processes, sga_target, nil +} + // CHECKS IF SID IN DATABASES SLICE , AND ITS DGROLE func IsDatabaseFound(sid string, databases []string, dgrole string) (bool, bool) { found := false @@ -429,66 +502,43 @@ func GetPrimaryDatabase(databases []string) string { return primary } -// Returns the databases in DG config . -func GetDatabasesInDgConfig(readyPod corev1.Pod, r client.Reader, - config *rest.Config, ctx context.Context, req ctrl.Request) ([]string, string, error) { - log := ctrllog.FromContext(ctx).WithValues("GetDatabasesInDgConfig", req.NamespacedName) - - // ## FIND DATABASES PRESENT IN DG CONFIGURATION - out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba ", DataguardBrokerGetDatabaseCMD)) - if err != nil { - return []string{}, "", err - } - log.Info("GetDatabasesInDgConfig Output") - log.Info(out) - - if !strings.Contains(out, "no rows selected") && !strings.Contains(out, "ORA-") { - out1 := strings.Replace(out, " ", "_", -1) - // filtering output and storing databses in dg configuration in "databases" slice - databases := strings.Fields(out1) - - // first 2 values in the slice will be column name(DATABASES) and a seperator(--------------) . so take the slice from position [2:] - databases = databases[2:] - return databases, out, nil - } - return []string{}, out, errors.New("databases in DG config is nil") -} - // Returns Database version func GetDatabaseVersion(readyPod corev1.Pod, r client.Reader, - config *rest.Config, ctx context.Context, req ctrl.Request, edition string) (string, string, error) { + config *rest.Config, ctx context.Context, req ctrl.Request) (string, error) { + log := ctrllog.FromContext(ctx).WithValues("GetDatabaseVersion", req.NamespacedName) // ## FIND DATABASES PRESENT IN DG CONFIGURATION out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", GetVersionSQL, GetSqlClient(edition))) + fmt.Sprintf("echo -e \"%s\" | %s", GetVersionSQL, SQLPlusCLI)) if err != nil { - return "", "", err + return "", err } log.Info("GetDatabaseVersion Output") log.Info(out) - - if !strings.Contains(out, "no rows selected") && !strings.Contains(out, "ORA-") { - out1 := strings.Replace(out, " ", "_", -1) - // filtering output and storing databses in dg configuration in "databases" slice - out2 := strings.Fields(out1) - - // first 2 values in the slice will be column name(VERSION) and a seperator(--------------) . so the version would be out2[2] - version := out2[2] - return version, out, nil + if strings.Contains(out, "no rows selected") { + return "", errors.New("cannot fetch database version") + } + if strings.Contains(out, "ORA-") { + return "", errors.New("error while trying to get the database version " + out) } - return "", out, errors.New("database version is nil") + out1 := strings.Replace(out, " ", "_", -1) + // filtering output and storing databses in dg configuration in "databases" slice + out2 := strings.Fields(out1) + // first 2 values in the slice will be column name(VERSION) and a seperator(--------------) . so the version would be out2[2] + version := out2[2] + return version, nil } // Fetch role by quering the DB func GetDatabaseRole(readyPod corev1.Pod, r client.Reader, - config *rest.Config, ctx context.Context, req ctrl.Request, edition string) (string, error) { + config *rest.Config, ctx context.Context, req ctrl.Request) (string, error) { + log := ctrllog.FromContext(ctx).WithValues("GetDatabaseRole", req.NamespacedName) out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", GetDatabaseRoleCMD, GetSqlClient(edition))) + fmt.Sprintf("echo -e \"%s\" | %s", GetDatabaseRoleCMD, SQLPlusCLI)) if err != nil { return "", err } @@ -496,7 +546,7 @@ func GetDatabaseRole(readyPod corev1.Pod, r client.Reader, if !strings.Contains(out, "no rows selected") && !strings.Contains(out, "ORA-") { out = strings.Replace(out, " ", "_", -1) // filtering output and storing databse_role in "database_role" - databaseRole := strings.Fields(out)[2] + databaseRole := strings.ToUpper(strings.Fields(out)[2]) // first 2 values in the slice will be column name(DATABASE_ROLE) and a seperator(--------------) . return databaseRole, nil @@ -504,6 +554,26 @@ func GetDatabaseRole(readyPod corev1.Pod, r client.Reader, return "", errors.New("database role is nil") } +func GetDatabaseOpenMode(readyPod corev1.Pod, r client.Reader, + config *rest.Config, ctx context.Context, req ctrl.Request, edition string) (string, error) { + log := ctrllog.FromContext(ctx).WithValues("GetDatabaseOpenMode", req.NamespacedName) + + out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", GetDBOpenMode, SQLPlusCLI)) + if err != nil { + return "", err + } + log.Info(out) + if !strings.Contains(out, "no rows selected") && !strings.Contains(out, "ORA-") { + out1 := strings.Replace(out, " ", "_", -1) + // filtering output and storing databse_role in "database_role" + databaseOpenMode := strings.Fields(out1)[2] + // first 2 values in the slice will be column name(DATABASE_ROLE) and a seperator(--------------) . + return databaseOpenMode, nil + } + return "", errors.New("database open mode is nil") +} + // Returns true if any of the pod in 'pods' is with pod.Status.Phase == phase func IsAnyPodWithStatus(pods []corev1.Pod, phase corev1.PodPhase) (bool, corev1.Pod) { anyPodWithPhase := false @@ -539,38 +609,61 @@ func GetNodeIp(r client.Reader, ctx context.Context, req ctrl.Request) string { log := ctrllog.FromContext(ctx).WithValues("GetNodeIp", req.NamespacedName) - readyPod, _, available, _, err := FindPods(r, "", "", req.Name, req.Namespace, ctx, req) + //new workflow + nl := &corev1.NodeList{} + err := r.List(ctx, nl) + nodeip := "" if err != nil { log.Error(err, err.Error()) - return "" + return nodeip } - if readyPod.Name != "" { - available = append(available, readyPod) - } - nodeip := "" - for _, pod := range available { - if nodeip == "" { - nodeip = pod.Status.HostIP - } - if pod.Status.HostIP < nodeip { - nodeip = pod.Status.HostIP + + for _, address := range nl.Items[0].Status.Addresses { + if address.Type == "ExternalIP" { + nodeip = address.Address + break } } - - node := &corev1.Node{} - err = r.Get(ctx, types.NamespacedName{Name: nodeip, Namespace: req.Namespace}, node) - - if err == nil { - for _, address := range node.Status.Addresses { - if address.Type == "ExternalIP" { + if nodeip == "" { + for _, address := range nl.Items[0].Status.Addresses { + if address.Type == "InternalIP" { nodeip = address.Address + break } } } + log.Info("Node IP obtained ! ", "nodeip: ", nodeip) + return nodeip } +// GetSidPdbEdition to display sid, pdbname, edition in ConnectionString +func GetSidPdbEdition(r client.Reader, config *rest.Config, ctx context.Context, req ctrl.Request) (string, string, string, error) { + + log := ctrllog.FromContext(ctx).WithValues("GetSidbPdbEdition", req.NamespacedName) + + readyPod, _, _, _, err := FindPods(r, "", "", req.Name, req.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return "", "", "", fmt.Errorf("error while fetching ready pod %s : \n %s", readyPod.Name, err.Error()) + } + if readyPod.Name != "" { + out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", GetSidPdbEditionCMD) + if err != nil { + log.Error(err, err.Error()) + return "", "", "", err + } + log.Info("GetSidPdbEditionCMD output \n" + out) + splitstr := strings.Split((strings.TrimSpace(out)), ",") + return splitstr[0], splitstr[1], splitstr[2], nil + } + // err = errors.New("ready pod name is nil") + log.Error(err, ErrNoReadyPod.Error()) + return "", "", "", ErrNoReadyPod +} + // Get Datapatch Status func GetSqlpatchStatus(r client.Reader, config *rest.Config, readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (string, string, string, error) { log := ctrllog.FromContext(ctx).WithValues("getSqlpatchStatus", req.NamespacedName) @@ -611,9 +704,78 @@ func GetSqlpatchStatus(r client.Reader, config *rest.Config, readyPod corev1.Pod return sqlpatchStatuses[0], splitstr[0], splitstr[1], nil } +// Is Source Database On same Cluster +func IsSourceDatabaseOnCluster(cloneFrom string) bool { + if strings.Contains(cloneFrom, ":") && strings.Contains(cloneFrom, "/") { + return false + } + return true +} + +// Apex password validation function +func ApexPasswordValidator(pwd string) bool { + var ( + hasMinLen = false + hasUpper = false + hasLower = false + hasNumber = false + hasSpecial = false + ) + if len(pwd) > 7 { + hasMinLen = true + } + + for _, c := range pwd { + switch { + case unicode.IsUpper(c): + hasUpper = true + case unicode.IsLower(c): + hasLower = true + case unicode.IsNumber(c): + hasNumber = true + case unicode.IsPunct(c): + hasSpecial = true + } + } + + return hasMinLen && hasUpper && hasLower && hasNumber && hasSpecial +} + func GetSqlClient(edition string) string { if edition == "express" { return "su -p oracle -c \"sqlplus -s / as sysdba\"" } return "sqlplus -s / as sysdba" } + +// Function for patching the K8s service with the payload. +// Patch strategy used: Strategic Merge Patch +func PatchService(config *rest.Config, namespace string, ctx context.Context, req ctrl.Request, svcName string, payload string) error { + log := ctrllog.FromContext(ctx).WithValues("patchService", req.NamespacedName) + client, err := kubernetes.NewForConfig(config) + if err != nil { + log.Error(err, "config error") + } + + // Trying to patch the service resource using Strategic Merge strategy + log.Info("Patching the service", "Service", svcName) + _, err = client.CoreV1().Services(namespace).Patch(ctx, svcName, types.MergePatchType, []byte(payload), metav1.PatchOptions{}) + return err +} + +func GetWatchNamespaces() map[string]bool { + // Fetching the allowed namespaces from env variables + var watchNamespaceEnvVar = "WATCH_NAMESPACE" + ns, _ := os.LookupEnv(watchNamespaceEnvVar) + ns = strings.TrimSpace(ns) + namespaces := make(map[string]bool) + if len(ns) == 0 { + return namespaces + } + namespacesArr := strings.Split(ns, ",") + // put slice values into map + for _, s := range namespacesArr { + namespaces[s] = true + } + return namespaces +} diff --git a/commons/dbcssystem/dbcs_reconciler.go b/commons/dbcssystem/dbcs_reconciler.go new file mode 100644 index 00000000..6c498320 --- /dev/null +++ b/commons/dbcssystem/dbcs_reconciler.go @@ -0,0 +1,1567 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package common + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/core" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/annotations" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + checkInterval = 30 * time.Second + timeout = 15 * time.Minute +) + +func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient, kmsDetails *databasev4.KMSDetailsStatus) (string, error) { + + ctx := context.TODO() + // Check if DBCS system already exists using the displayName + listDbcsRequest := database.ListDbSystemsRequest{ + CompartmentId: common.String(dbcs.Spec.DbSystem.CompartmentId), + DisplayName: common.String(dbcs.Spec.DbSystem.DisplayName), + } + + listDbcsResponse, err := dbClient.ListDbSystems(ctx, listDbcsRequest) + if err != nil { + return "", err + } + + // Check if any DBCS system matches the display name + if len(listDbcsResponse.Items) > 0 { + for _, dbcsItem := range listDbcsResponse.Items { + if dbcsItem.DisplayName != nil && *dbcsItem.DisplayName == dbcs.Spec.DbSystem.DisplayName { + logger.Info("DBCS system already exists", "DBCS ID", *dbcsItem.Id) + return *dbcsItem.Id, nil + } + } + } + + // Get the admin password from OCI key + sshPublicKeys, err := getPublicSSHKey(kubeClient, dbcs) + if err != nil { + return "", err + } + + // Get DB SystemOptions + dbSystemReq := GetDBSystemopts(dbcs) + licenceModel := getLicenceModel(dbcs) + + // Get DB Home Details + dbHomeReq, err := GetDbHomeDetails(kubeClient, dbClient, dbcs) + if err != nil { + return "", err + } + + // Determine CpuCoreCount + cpuCoreCount := 2 // default value + if dbcs.Spec.DbSystem.CpuCoreCount > 0 { + cpuCoreCount = dbcs.Spec.DbSystem.CpuCoreCount + } + + // Set up DB system details + dbcsDetails := database.LaunchDbSystemDetails{ + AvailabilityDomain: common.String(dbcs.Spec.DbSystem.AvailabilityDomain), + CompartmentId: common.String(dbcs.Spec.DbSystem.CompartmentId), + SubnetId: common.String(dbcs.Spec.DbSystem.SubnetId), + Shape: common.String(dbcs.Spec.DbSystem.Shape), + Domain: common.String(dbcs.Spec.DbSystem.Domain), + DisplayName: common.String(dbcs.Spec.DbSystem.DisplayName), + SshPublicKeys: []string{sshPublicKeys}, + Hostname: common.String(dbcs.Spec.DbSystem.HostName), + CpuCoreCount: common.Int(cpuCoreCount), + NodeCount: common.Int(GetNodeCount(dbcs)), + InitialDataStorageSizeInGB: common.Int(GetInitialStorage(dbcs)), + DbSystemOptions: &dbSystemReq, + DbHome: &dbHomeReq, + DatabaseEdition: GetDBEdition(dbcs), + DiskRedundancy: GetDBbDiskRedundancy(dbcs), + LicenseModel: database.LaunchDbSystemDetailsLicenseModelEnum(licenceModel), + } + + if len(dbcs.Spec.DbSystem.Tags) != 0 { + dbcsDetails.FreeformTags = dbcs.Spec.DbSystem.Tags + } + + // Add KMS details if available + if kmsDetails != nil && kmsDetails.VaultId != "" { + dbcsDetails.KmsKeyId = common.String(kmsDetails.KeyId) + dbcsDetails.DbHome.Database.KmsKeyId = common.String(kmsDetails.KeyId) + dbcsDetails.DbHome.Database.VaultId = common.String(kmsDetails.VaultId) + } + + // Log dbcsDetails for debugging + logger.Info("Launching DB System with details", "dbcsDetails", dbcsDetails) + + req := database.LaunchDbSystemRequest{LaunchDbSystemDetails: dbcsDetails} + + // Send the request using the service client + resp, err := dbClient.LaunchDbSystem(ctx, req) + if err != nil { + return " ", err + } + + dbcs.Spec.Id = resp.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the State + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *resp.DbSystem.Id, nil +} + +func parseLicenseModel(licenseModelStr string) (database.DbSystemLicenseModelEnum, error) { + switch licenseModelStr { + case "LICENSE_INCLUDED": + return database.DbSystemLicenseModelLicenseIncluded, nil + case "BRING_YOUR_OWN_LICENSE": + return database.DbSystemLicenseModelBringYourOwnLicense, nil + default: + return "", fmt.Errorf("invalid license model: %s", licenseModelStr) + } +} +func convertLicenseModel(licenseModel database.DbSystemLicenseModelEnum) (database.LaunchDbSystemFromDbSystemDetailsLicenseModelEnum, error) { + switch licenseModel { + case database.DbSystemLicenseModelLicenseIncluded: + return database.LaunchDbSystemFromDbSystemDetailsLicenseModelLicenseIncluded, nil + case database.DbSystemLicenseModelBringYourOwnLicense: + return database.LaunchDbSystemFromDbSystemDetailsLicenseModelBringYourOwnLicense, nil + default: + return "", fmt.Errorf("unsupported license model: %s", licenseModel) + } +} + +func CloneAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + var err error + dbAdminPassword := "" + // tdePassword := "" + logger.Info("Starting the clone process for DBCS", "dbcs", dbcs) + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + } + // logger.Info(dbAdminPassword) + } + // // Log retrieved passwords + logger.Info("Retrieved passwords from Kubernetes secrets") + + // // // Retrieve the TDE wallet password from Kubernetes secrets + // // tdePassword, err := GetTdePassword(kubeClient, dbcs.Namespace, dbcs.Spec.TdeWalletPasswordSecretName) + // // if err != nil { + // // logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret", "namespace", dbcs.Namespace, "secretName", dbcs.Spec.TdeWalletPasswordSecretName) + // // return "", err + // // } + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "failed to get SSH public key") + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbcs.Spec.Id, + }) + if err != nil { + return "", err + } + logger.Info("Retrieved existing Db System Details from OCI using Spec.Id") + + // // Create the clone request payload + // // Create the DbHome details + // Prepare CreateDatabaseFromDbSystemDetails + databaseDetails := &database.CreateDatabaseFromDbSystemDetails{ + AdminPassword: &dbAdminPassword, + DbName: &dbcs.Spec.DbClone.DbName, + DbDomain: existingDbSystem.DbSystem.Domain, + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + } + licenseModelEnum, err := parseLicenseModel(dbcs.Spec.DbClone.LicenseModel) + if err != nil { + return "", err + } + launchLicenseModel, err := convertLicenseModel(licenseModelEnum) + if err != nil { + return "", err + } + + cloneRequest := database.LaunchDbSystemFromDbSystemDetails{ + CompartmentId: existingDbSystem.DbSystem.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: &dbcs.Spec.DbClone.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + SourceDbSystemId: existingDbSystem.DbSystem.Id, + DbHome: &database.CreateDbHomeFromDbSystemDetails{ + Database: databaseDetails, + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + DbSystemOptions: existingDbSystem.DbSystem.DbSystemOptions, + SparseDiskgroup: existingDbSystem.DbSystem.SparseDiskgroup, + Domain: &dbcs.Spec.DbClone.Domain, + ClusterName: existingDbSystem.DbSystem.ClusterName, + DataStoragePercentage: existingDbSystem.DbSystem.DataStoragePercentage, + // KmsKeyId: existingDbSystem.DbSystem.KmsKeyId, + // KmsKeyVersionId: existingDbSystem.DbSystem.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + DataCollectionOptions: existingDbSystem.DbSystem.DataCollectionOptions, + LicenseModel: launchLicenseModel, + } + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil + // return "", nil +} + +// CloneFromBackupAndGetDbcsId clones a DB system from a backup and returns the new DB system's OCID. +func CloneFromBackupAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + + var err error + var dbAdminPassword string + var tdePassword string + logger.Info("Starting the clone process for DBCS from backup", "dbcs", dbcs) + backupResp, err := dbClient.GetBackup(ctx, database.GetBackupRequest{ + BackupId: dbcs.Spec.DbBackupId, + }) + + if err != nil { + fmt.Println("Error getting backup details:", err) + return "", err + } + databaseId := backupResp.Backup.DatabaseId + // Fetch the existing Database details + existingDatabase, err := dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ + DatabaseId: databaseId, + }) + if err != nil { + logger.Error(err, "Failed to retrieve existing Database details") + return "", err + } + // Check if DbSystemId is available + dbSystemId := existingDatabase.DbSystemId + if dbSystemId == nil { + // handle the case where DbSystemId is not available + logger.Error(err, "DBSystemId not found") + return "", err + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbSystemId, + }) + if err != nil { + return "", err + } + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + } + // logger.Info(dbAdminPassword) + } + // // // Retrieve the TDE wallet password from Kubernetes secrets to open backup DB using TDE Wallet + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + tdePassword, err = GetCloningTdePassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret") + return "", err + } + } + + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "failed to get SSH public key") + return "", err + } + + // Create the clone request payload + cloneRequest := database.LaunchDbSystemFromBackupDetails{ + CompartmentId: existingDbSystem.DbSystem.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: &dbcs.Spec.DbClone.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + DbHome: &database.CreateDbHomeFromBackupDetails{ + Database: &database.CreateDatabaseFromBackupDetails{ // Corrected type here + BackupId: dbcs.Spec.DbBackupId, + AdminPassword: &dbAdminPassword, + BackupTDEPassword: &tdePassword, + DbName: &dbcs.Spec.DbClone.DbName, + // DbDomain: existingDbSystem.DbSystem.Domain, + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + // FreeformTags: existingDbSystem.DbSystem.FreeformTags, + // DefinedTags: existingDbSystem.DbSystem.DefinedTags, + SidPrefix: &dbcs.Spec.DbClone.SidPrefix, + }, + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + DbSystemOptions: existingDbSystem.DbSystem.DbSystemOptions, + SparseDiskgroup: existingDbSystem.DbSystem.SparseDiskgroup, + Domain: &dbcs.Spec.DbClone.Domain, + ClusterName: existingDbSystem.DbSystem.ClusterName, + DataStoragePercentage: existingDbSystem.DbSystem.DataStoragePercentage, + InitialDataStorageSizeInGB: &dbcs.Spec.DbClone.InitialDataStorageSizeInGB, + KmsKeyId: &dbcs.Spec.DbClone.KmsKeyId, + KmsKeyVersionId: &dbcs.Spec.DbClone.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + DataCollectionOptions: existingDbSystem.DbSystem.DataCollectionOptions, + DatabaseEdition: database.LaunchDbSystemFromBackupDetailsDatabaseEditionEnum(existingDbSystem.DbSystem.DatabaseEdition), + LicenseModel: database.LaunchDbSystemFromBackupDetailsLicenseModelEnum(existingDbSystem.DbSystem.LicenseModel), + StorageVolumePerformanceMode: database.LaunchDbSystemBaseStorageVolumePerformanceModeEnum(existingDbSystem.DbSystem.StorageVolumePerformanceMode), + } + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil +} + +// Sync the DbcsSystem Database details +func CloneFromDatabaseAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + var err error + dbAdminPassword := "" + tdePassword := "" + logger.Info("Starting the clone process for Database", "dbcs", dbcs) + + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + return "", err + } + } + // // // Retrieve the TDE wallet password from Kubernetes secrets to open backup DB using TDE Wallet + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + tdePassword, err = GetCloningTdePassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret") + return "", err + } + } + + logger.Info("Retrieved passwords from Kubernetes secrets") + + // Fetch the existing Database details + existingDatabase, err := dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ + DatabaseId: dbcs.Spec.DatabaseId, + }) + if err != nil { + logger.Error(err, "Failed to retrieve existing Database details") + return "", err + } + // Check if DbSystemId is available + dbSystemId := existingDatabase.DbSystemId + if dbSystemId == nil { + // handle the case where DbSystemId is not available + logger.Error(err, "DBSystemId not found") + return "", err + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbSystemId, + }) + if err != nil { + return "", err + } + logger.Info("Retrieved existing Database details from OCI", "DatabaseId", dbcs.Spec.DatabaseId) + + // Get SSH public key + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get SSH public key") + return "", err + } + + // Create the clone request payload + cloneRequest := database.LaunchDbSystemFromDatabaseDetails{ + CompartmentId: existingDatabase.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: existingDbSystem.DbSystem.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + DatabaseEdition: database.LaunchDbSystemFromDatabaseDetailsDatabaseEditionEnum(existingDbSystem.DbSystem.DatabaseEdition), + DbHome: &database.CreateDbHomeFromDatabaseDetails{ + Database: &database.CreateDatabaseFromAnotherDatabaseDetails{ + // Mandatory fields + DatabaseId: dbcs.Spec.DatabaseId, // Source database ID + // Optionally fill in other fields if needed + DbName: &dbcs.Spec.DbClone.DbName, + AdminPassword: &dbAdminPassword, // Admin password for the new database + // The password to open the TDE wallet. + BackupTDEPassword: &tdePassword, + + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + }, + + // Provide a display name for the new Database Home + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + KmsKeyId: &dbcs.Spec.DbClone.KmsKeyId, + KmsKeyVersionId: &dbcs.Spec.DbClone.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + // PrivateIp: &dbcs.Spec.DbClone.PrivateIp, + InitialDataStorageSizeInGB: &dbcs.Spec.DbClone.InitialDataStorageSizeInGB, + LicenseModel: database.LaunchDbSystemFromDatabaseDetailsLicenseModelEnum(existingDbSystem.DbSystem.LicenseModel), + StorageVolumePerformanceMode: database.LaunchDbSystemBaseStorageVolumePerformanceModeEnum(existingDbSystem.DbSystem.StorageVolumePerformanceMode), + } + + // logger.Info("Launching database clone", "cloneRequest", cloneRequest) + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil +} + +// Get admin password from Secret then OCI valut secret +func GetCloningAdminPassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + // Get the Admin Secret + adminSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbClone.DbAdminPasswordSecret, + }, adminSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "admin-password" + if val, ok := adminSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: admin-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func GetAdminPassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbSystem.DbAdminPasswordSecret != "" { + // Get the Admin Secret + adminSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbSystem.DbAdminPasswordSecret, + }, adminSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "admin-password" + if val, ok := adminSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: admin-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func GetTdePassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbSystem.TdeWalletPasswordSecret != "" { + // Get the Admin Secret + tdeSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbSystem.TdeWalletPasswordSecret, + }, tdeSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "tde-password" + if val, ok := tdeSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: tde-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func GetCloningTdePassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + // Get the Admin Secret + tdeSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbClone.TdeWalletPasswordSecret, + }, tdeSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "tde-password" + if val, ok := tdeSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: tde-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func getPublicSSHKey(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbSystem.SshPublicKeys[0] != "" { + // Get the Admin Secret + sshkeysecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbSystem.SshPublicKeys[0], + }, sshkeysecret) + + if err != nil { + return "", err + } + + // Get the admin password` + key := "publickey" + if val, ok := sshkeysecret.Data[key]; ok { + return string(val), nil + } else { + msg := "secret item not found: " + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func getCloningPublicSSHKey(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.SshPublicKeys[0] != "" { + // Get the Admin Secret + sshkeysecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbClone.SshPublicKeys[0], + }, sshkeysecret) + + if err != nil { + return "", err + } + + // Get the admin password` + key := "publickey" + if val, ok := sshkeysecret.Data[key]; ok { + return string(val), nil + } else { + msg := "secret item not found: " + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Delete DbcsSystem System +func DeleteDbcsSystemSystem(dbClient database.DatabaseClient, Id string) error { + + dbcsId := Id + + dbcsReq := database.TerminateDbSystemRequest{ + DbSystemId: &dbcsId, + } + + _, err := dbClient.TerminateDbSystem(context.TODO(), dbcsReq) + if err != nil { + return err + } + + return nil +} + +// SetLifecycleState set status.state of the reosurce. +func SetLifecycleState(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, state databasev4.LifecycleState, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { + maxRetries := 5 + retryDelay := time.Second * 2 + + for attempt := 0; attempt < maxRetries; attempt++ { + // Fetch the latest version of the object + latestInstance := &databasev4.DbcsSystem{} + err := kubeClient.Get(context.TODO(), client.ObjectKeyFromObject(dbcs), latestInstance) + if err != nil { + // Log and return error if fetching the latest version fails + return fmt.Errorf("failed to fetch the latest version of DBCS instance: %w", err) + } + + // Merge the instance fields into latestInstance + err = mergeInstancesFromLatest(dbcs, latestInstance) + if err != nil { + return fmt.Errorf("failed to merge instances: %w", err) + } + + // Set the status using the dbcs object + if statusErr := SetDBCSStatus(dbClient, dbcs, nwClient, wrClient); statusErr != nil { + return statusErr + } + + // Update the ResourceVersion of dbcs from latestInstance to avoid conflict + dbcs.ResourceVersion = latestInstance.ResourceVersion + + // Attempt to patch the status of the instance + err = kubeClient.Status().Patch(context.TODO(), dbcs, client.MergeFrom(latestInstance)) + if err != nil { + if apierrors.IsConflict(err) { + // Handle the conflict and retry + time.Sleep(retryDelay) + continue + } + // For other errors, log and return the error + return fmt.Errorf("failed to update the DBCS instance status: %w", err) + } + + // If no error, break the loop + break + } + + return nil +} +func mergeInstancesFromLatest(instance, latestInstance *databasev4.DbcsSystem) error { + instanceVal := reflect.ValueOf(instance).Elem() + latestVal := reflect.ValueOf(latestInstance).Elem() + + // Fields to exclude from merging + excludeFields := map[string]bool{ + "ReleaseUpdate": true, + "AsmStorageStatus": true, + } + + // Loop through the fields in instance + for i := 0; i < instanceVal.NumField(); i++ { + field := instanceVal.Type().Field(i) + instanceField := instanceVal.Field(i) + latestField := latestVal.FieldByName(field.Name) + + // Skip unexported fields + if !isExported(field) { + continue + } + + // Ensure latestField is valid + if !latestField.IsValid() || !instanceField.CanSet() { + continue + } + + // Skip fields that are in the exclusion list + if excludeFields[field.Name] { + continue + } + + // Handle pointer fields + if latestField.Kind() == reflect.Ptr { + if !latestField.IsNil() && instanceField.IsNil() { + // If instance's field is nil and latest's field is not nil, set the latest's field value + instanceField.Set(latestField) + } + // If instance's field is not nil, do not overwrite + } else if latestField.Kind() == reflect.String { + if latestField.String() != "" && latestField.String() != "NOT_DEFINED" && instanceField.String() == "" { + // If latest's string field is non-empty and not "NOT_DEFINED", and instance's string field is empty, set the value + instanceField.Set(latestField) + } + } else if latestField.Kind() == reflect.Struct { + // Handle struct types recursively + mergeStructFields(instanceField, latestField) + } else { + // Handle other types if instance's field is zero value + if reflect.DeepEqual(instanceField.Interface(), reflect.Zero(instanceField.Type()).Interface()) { + instanceField.Set(latestField) + } + } + } + return nil +} + +func mergeStructFields(instanceField, latestField reflect.Value) { + for i := 0; i < instanceField.NumField(); i++ { + subField := instanceField.Type().Field(i) + instanceSubField := instanceField.Field(i) + latestSubField := latestField.Field(i) + + if !isExported(subField) || !instanceSubField.CanSet() { + continue + } + + if latestSubField.Kind() == reflect.Ptr { + if !latestSubField.IsNil() && instanceSubField.IsNil() { + instanceSubField.Set(latestSubField) + } + } else if latestSubField.Kind() == reflect.String { + if latestSubField.String() != "" && latestSubField.String() != "NOT_DEFINED" && instanceSubField.String() == "" { + instanceSubField.Set(latestSubField) + } + } else if latestSubField.Kind() == reflect.Struct { + mergeStructFields(instanceSubField, latestSubField) + } else { + if reflect.DeepEqual(instanceSubField.Interface(), reflect.Zero(instanceSubField.Type()).Interface()) { + instanceSubField.Set(latestSubField) + } + } + } +} + +func isExported(field reflect.StructField) bool { + return field.PkgPath == "" +} + +// SetDBCSSystem LifeCycle state when state is provisioning + +func SetDBCSDatabaseLifecycleState(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { + + dbcsId := *dbcs.Spec.Id + + dbcsReq := database.GetDbSystemRequest{ + DbSystemId: &dbcsId, + } + + resp, err := dbClient.GetDbSystem(context.TODO(), dbcsReq) + if err != nil { + return err + } + + // Return if the desired lifecycle state is the same as the current lifecycle state + if string(dbcs.Status.State) == string(resp.LifecycleState) { + return nil + } else if string(resp.LifecycleState) == string(databasev4.Available) { + // Change the phase to "Available" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Available, nwClient, wrClient); statusErr != nil { + return statusErr + } + } else if string(resp.LifecycleState) == string(databasev4.Provision) { + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return statusErr + } + // Check the State + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return err + } + } else if string(resp.LifecycleState) == string(databasev4.Update) { + // Change the phase to "Updating" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { + return statusErr + } + // Check the State + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Update), string(databasev4.Available)) + if err != nil { + return err + } + } else if string(resp.LifecycleState) == string(databasev4.Failed) { + // Change the phase to "Updating" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Failed, nwClient, wrClient); statusErr != nil { + return statusErr + } + return fmt.Errorf("DbSystem is in Failed State") + } else if string(resp.LifecycleState) == string(databasev4.Terminated) { + // Change the phase to "Terminated" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Terminate, nwClient, wrClient); statusErr != nil { + return statusErr + } + } + return nil +} + +func GetDbSystemId(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) error { + dbcsId := *dbcs.Spec.Id + + dbcsReq := database.GetDbSystemRequest{ + DbSystemId: &dbcsId, + } + + response, err := dbClient.GetDbSystem(context.TODO(), dbcsReq) + if err != nil { + return err + } + + dbcs.Spec.DbSystem.CompartmentId = *response.CompartmentId + if response.DisplayName != nil { + dbcs.Spec.DbSystem.DisplayName = *response.DisplayName + } + + if response.Hostname != nil { + dbcs.Spec.DbSystem.HostName = *response.Hostname + } + if response.CpuCoreCount != nil { + dbcs.Spec.DbSystem.CpuCoreCount = *response.CpuCoreCount + } + dbcs.Spec.DbSystem.NodeCount = response.NodeCount + if response.ClusterName != nil { + dbcs.Spec.DbSystem.ClusterName = *response.ClusterName + } + //dbcs.Spec.DbSystem.DbUniqueName = *response.DbUniqueName + if string(response.DbSystem.DatabaseEdition) != "" { + dbcs.Spec.DbSystem.DbEdition = string(response.DatabaseEdition) + } + if string(response.DiskRedundancy) != "" { + dbcs.Spec.DbSystem.DiskRedundancy = string(response.DiskRedundancy) + } + + //dbcs.Spec.DbSystem.DbVersion = *response. + + if response.BackupSubnetId != nil { + dbcs.Spec.DbSystem.BackupSubnetId = *response.BackupSubnetId + } + dbcs.Spec.DbSystem.Shape = *response.Shape + dbcs.Spec.DbSystem.SshPublicKeys = []string(response.SshPublicKeys) + if response.FaultDomains != nil { + dbcs.Spec.DbSystem.FaultDomains = []string(response.FaultDomains) + } + dbcs.Spec.DbSystem.SubnetId = *response.SubnetId + dbcs.Spec.DbSystem.AvailabilityDomain = *response.AvailabilityDomain + if response.KmsKeyId != nil { + dbcs.Status.KMSDetailsStatus.KeyId = *response.KmsKeyId + } + err = PopulateDBDetails(logger, dbClient, dbcs) + if err != nil { + logger.Info("Error Occurred while collecting the DB details") + return err + } + return nil +} + +func PopulateDBDetails(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) error { + + listDbHomeRsp, err := GetListDbHomeRsp(logger, dbClient, dbcs) + if err != nil { + logger.Info("Error Occurred while getting List of DBHomes") + return err + } + dbHomeId := listDbHomeRsp.Items[0].Id + listDBRsp, err := GetListDatabaseRsp(logger, dbClient, dbcs, *dbHomeId) + if err != nil { + logger.Info("Error Occurred while getting List of Databases") + return err + } + + dbcs.Spec.DbSystem.DbName = *listDBRsp.Items[0].DbName + dbcs.Spec.DbSystem.DbUniqueName = *listDBRsp.Items[0].DbUniqueName + dbcs.Spec.DbSystem.DbVersion = *listDbHomeRsp.Items[0].DbVersion + + return nil +} + +func GetListDbHomeRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) (database.ListDbHomesResponse, error) { + + dbcsId := *dbcs.Spec.Id + CompartmentId := dbcs.Spec.DbSystem.CompartmentId + + dbHomeReq := database.ListDbHomesRequest{ + DbSystemId: &dbcsId, + CompartmentId: &CompartmentId, + } + + response, err := dbClient.ListDbHomes(context.TODO(), dbHomeReq) + if err != nil { + return database.ListDbHomesResponse{}, err + } + + return response, nil +} + +func GetListDatabaseRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbHomeId string) (database.ListDatabasesResponse, error) { + + CompartmentId := dbcs.Spec.DbSystem.CompartmentId + + dbReq := database.ListDatabasesRequest{ + DbHomeId: &dbHomeId, + CompartmentId: &CompartmentId, + } + + response, err := dbClient.ListDatabases(context.TODO(), dbReq) + if err != nil { + return database.ListDatabasesResponse{}, err + } + + return response, nil +} + +func UpdateDbcsSystemIdInst(log logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, kubeClient client.Client, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient, databaseID string) error { + // log.Info("Existing DB System Getting Updated with new details in UpdateDbcsSystemIdInst") + var err error + updateFlag := false + updateDbcsDetails := database.UpdateDbSystemDetails{} + // log.Info("Current annotations", "annotations", dbcs.GetAnnotations()) + oldSpec, err := dbcs.GetLastSuccessfulSpecWithLog(log) // Use the new method + if err != nil { + log.Error(err, "Failed to get last successful spec") + return err + } + + if oldSpec == nil { + log.Info("oldSpec is nil") + } else { + log.Info("Details of oldSpec", "oldSpec", oldSpec) + } + log.Info("Details of updateFlag -> " + fmt.Sprint(updateFlag)) + + if dbcs.Spec.DbSystem.CpuCoreCount > 0 && ((dbcs.Spec.DbSystem.CpuCoreCount != oldSpec.DbSystem.CpuCoreCount) || (dbcs.Spec.DbSystem.CpuCoreCount != *&dbcs.Status.CpuCoreCount)) { + log.Info("DB System cpu core count is: " + fmt.Sprint(dbcs.Spec.DbSystem.CpuCoreCount) + " DB System old cpu count is: " + fmt.Sprint(oldSpec.DbSystem.CpuCoreCount)) + updateDbcsDetails.CpuCoreCount = common.Int(dbcs.Spec.DbSystem.CpuCoreCount) + updateFlag = true + } + if dbcs.Spec.DbSystem.Shape != "" && ((dbcs.Spec.DbSystem.Shape != oldSpec.DbSystem.Shape) || (dbcs.Spec.DbSystem.Shape != *dbcs.Status.Shape)) { + // log.Info("DB System desired shape is :" + string(dbcs.Spec.DbSystem.Shape) + "DB System old shape is " + string(oldSpec.DbSystem.Shape)) + updateDbcsDetails.Shape = common.String(dbcs.Spec.DbSystem.Shape) + updateFlag = true + } + + if dbcs.Spec.DbSystem.LicenseModel != "" && ((dbcs.Spec.DbSystem.LicenseModel != oldSpec.DbSystem.LicenseModel) || (dbcs.Spec.DbSystem.LicenseModel != *&dbcs.Status.LicenseModel)) { + licenceModel := getLicenceModel(dbcs) + // log.Info("DB System desired License Model is :" + string(dbcs.Spec.DbSystem.LicenseModel) + "DB Sytsem old License Model is " + string(oldSpec.DbSystem.LicenseModel)) + updateDbcsDetails.LicenseModel = database.UpdateDbSystemDetailsLicenseModelEnum(licenceModel) + updateFlag = true + } + + if dbcs.Spec.DbSystem.InitialDataStorageSizeInGB != 0 && dbcs.Spec.DbSystem.InitialDataStorageSizeInGB != oldSpec.DbSystem.InitialDataStorageSizeInGB { + // log.Info("DB System desired Storage Size is :" + fmt.Sprint(dbcs.Spec.DbSystem.InitialDataStorageSizeInGB) + "DB System old Storage Size is " + fmt.Sprint(oldSpec.DbSystem.InitialDataStorageSizeInGB)) + updateDbcsDetails.DataStorageSizeInGBs = &dbcs.Spec.DbSystem.InitialDataStorageSizeInGB + updateFlag = true + } + + // // Check and update KMS details if necessary + if (dbcs.Spec.KMSConfig != databasev4.KMSConfig{}) { + if dbcs.Spec.KMSConfig != oldSpec.DbSystem.KMSConfig { + log.Info("Updating KMS details in Existing Database") + + kmsKeyID := dbcs.Status.KMSDetailsStatus.KeyId + vaultID := dbcs.Status.KMSDetailsStatus.VaultId + tdeWalletPassword := "" + if dbcs.Spec.DbSystem.TdeWalletPasswordSecret != "" { + tdeWalletPassword, err = GetTdePassword(kubeClient, dbcs) + if err != nil { + log.Error(err, "Failed to get TDE wallet password") + } + } else { + log.Info("Its mandatory to define Tde wallet password when KMS Vault is defined. Not updating existing database") + return nil + } + dbAdminPassword := "" + if dbcs.Spec.DbSystem.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetAdminPassword(kubeClient, dbcs) + if err != nil { + log.Error(err, "Failed to get DB Admin password") + } + } + + // Assign all available fields to KMSConfig + dbcs.Spec.DbSystem.KMSConfig = databasev4.KMSConfig{ + VaultName: dbcs.Spec.KMSConfig.VaultName, + CompartmentId: dbcs.Spec.KMSConfig.CompartmentId, + KeyName: dbcs.Spec.KMSConfig.KeyName, + EncryptionAlgo: dbcs.Spec.KMSConfig.EncryptionAlgo, + VaultType: dbcs.Spec.KMSConfig.VaultType, + } + + // Create the migrate vault key request + migrateRequest := database.MigrateVaultKeyRequest{ + DatabaseId: common.String(databaseID), + MigrateVaultKeyDetails: database.MigrateVaultKeyDetails{ + KmsKeyId: common.String(kmsKeyID), + VaultId: common.String(vaultID), + }, + } + if tdeWalletPassword != "" { + migrateRequest.TdeWalletPassword = common.String(tdeWalletPassword) + } + if dbAdminPassword != "" { + migrateRequest.AdminPassword = common.String(dbAdminPassword) + } + // Change the phase to "Updating" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { + return statusErr + } + // Send the request + migrateResponse, err := dbClient.MigrateVaultKey(context.TODO(), migrateRequest) + if err != nil { + log.Error(err, "Failed to migrate vault key") + return err + } + + // // Check for additional response details (if any) + if migrateResponse.RawResponse.StatusCode != 200 { + log.Error(fmt.Errorf("unexpected status code"), "Migrate vault key request failed", "StatusCode", migrateResponse.RawResponse.StatusCode) + return fmt.Errorf("MigrateVaultKey request failed with status code %d", migrateResponse.RawResponse.StatusCode) + } + + log.Info("MigrateVaultKey request succeeded, waiting for database to reach the desired state") + + // // Wait for the database to reach the desired state after migration, timeout for 2 hours + // Define timeout and check interval + timeout := 2 * time.Hour + checkInterval := 1 * time.Minute + + err = WaitForDatabaseState(log, dbClient, databaseID, "AVAILABLE", timeout, checkInterval) + if err != nil { + log.Error(err, "Database did not reach the desired state within the timeout period") + return err + } + // Change the phase to "Available" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Available, nwClient, wrClient); statusErr != nil { + return statusErr + } + + log.Info("KMS migration process completed successfully") + } + } + + log.Info("Details of updateFlag after validations is " + fmt.Sprint(updateFlag)) + if updateFlag { + updateDbcsRequest := database.UpdateDbSystemRequest{ + DbSystemId: common.String(*dbcs.Spec.Id), + UpdateDbSystemDetails: updateDbcsDetails, + } + + if _, err := dbClient.UpdateDbSystem(context.TODO(), updateDbcsRequest); err != nil { + return err + } + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { + return statusErr + } + // Check the State + _, err = CheckResourceState(log, dbClient, *dbcs.Spec.Id, "UPDATING", "AVAILABLE") + if err != nil { + return err + } + } + + return nil +} + +func WaitForDatabaseState( + log logr.Logger, + dbClient database.DatabaseClient, + databaseId string, + desiredState database.DbHomeLifecycleStateEnum, + timeout time.Duration, + checkInterval time.Duration, +) error { + // Set a deadline for the timeout + deadline := time.Now().Add(timeout) + + log.Info("Starting to wait for the database to reach the desired state", "DatabaseID", databaseId, "DesiredState", desiredState, "Timeout", timeout) + + for time.Now().Before(deadline) { + // Prepare the request to fetch database details + getDatabaseReq := database.GetDatabaseRequest{ + DatabaseId: &databaseId, + } + + // Fetch database details + databaseResp, err := dbClient.GetDatabase(context.TODO(), getDatabaseReq) + if err != nil { + log.Error(err, "Failed to get database details", "DatabaseID", databaseId) + return err + } + + // Log the current database state + log.Info("Database State", "DatabaseID", databaseId, "CurrentState", databaseResp.LifecycleState) + + // Check if the database has reached the desired state + if databaseResp.LifecycleState == database.DatabaseLifecycleStateEnum(desiredState) { + log.Info("Database reached the desired state", "DatabaseID", databaseId, "State", desiredState) + return nil + } + + // Wait for the specified interval before checking again + log.Info("Database not in the desired state yet, waiting...", "DatabaseID", databaseId, "CurrentState", databaseResp.LifecycleState, "DesiredState", desiredState, "NextCheckIn", checkInterval) + time.Sleep(checkInterval) + } + + // Return an error if the timeout is reached + err := fmt.Errorf("timed out waiting for database to reach the desired state: %s", desiredState) + log.Error(err, "Timeout reached while waiting for the database to reach the desired state", "DatabaseID", databaseId) + return err +} + +func UpdateDbcsSystemId(kubeClient client.Client, dbcs *databasev4.DbcsSystem) error { + payload := []annotations.PatchValue{{ + Op: "replace", + Path: "/spec/details", + Value: dbcs.Spec, + }} + payloadBytes, err := json.Marshal(payload) + if err != nil { + return err + } + + patch := client.RawPatch(types.JSONPatchType, payloadBytes) + return kubeClient.Patch(context.TODO(), dbcs, patch) +} + +func CheckResourceState(logger logr.Logger, dbClient database.DatabaseClient, Id string, currentState string, expectedState string) (string, error) { + // The database OCID is not available when the provisioning is onging. + // Retry until the new DbcsSystem is ready. + + var state string + var err error + for { + state, err = GetResourceState(logger, dbClient, Id) + if err != nil { + logger.Info("Error occurred while collecting the resource life cycle state") + return "", err + } + if string(state) == expectedState { + break + } else if string(state) == currentState { + logger.Info("DB System current state is still:" + string(state) + ". Sleeping for 60 seconds.") + time.Sleep(60 * time.Second) + continue + } else { + msg := "DB System current state " + string(state) + " is not matching " + expectedState + logger.Info(msg) + return "", errors.New(msg) + } + } + + return "", nil +} + +func GetResourceState(logger logr.Logger, dbClient database.DatabaseClient, Id string) (string, error) { + + dbcsId := Id + dbcsReq := database.GetDbSystemRequest{ + DbSystemId: &dbcsId, + } + + response, err := dbClient.GetDbSystem(context.TODO(), dbcsReq) + if err != nil { + return "", err + } + + state := string(response.LifecycleState) + + return state, nil +} + +func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { + + if dbcs.Spec.Id == nil { + dbcs.Status.State = "FAILED" + return nil + } + + if dbcs.Spec.Id == nil { + dbcs.Status.State = "FAILED" + return nil + } + + dbcsId := *dbcs.Spec.Id + + dbcsReq := database.GetDbSystemRequest{ + DbSystemId: &dbcsId, + } + + resp, err := dbClient.GetDbSystem(context.TODO(), dbcsReq) + if err != nil { + return err + } + + dbcs.Status.AvailabilityDomain = *resp.AvailabilityDomain + dbcs.Status.CpuCoreCount = *resp.CpuCoreCount + dbcs.Status.DataStoragePercentage = resp.DataStoragePercentage + dbcs.Status.DataStorageSizeInGBs = resp.DataStorageSizeInGBs + dbcs.Status.DbEdition = string(resp.DatabaseEdition) + dbcs.Status.DisplayName = *resp.DisplayName + dbcs.Status.LicenseModel = string(resp.LicenseModel) + dbcs.Status.RecoStorageSizeInGB = resp.RecoStorageSizeInGB + dbcs.Status.NodeCount = *resp.NodeCount + dbcs.Status.StorageManagement = string(resp.DbSystemOptions.StorageManagement) + dbcs.Status.Shape = resp.Shape + dbcs.Status.Id = resp.Id + dbcs.Status.SubnetId = *resp.SubnetId + dbcs.Status.TimeZone = *resp.TimeZone + dbcs.Status.LicenseModel = string(resp.LicenseModel) + dbcs.Status.Network.ScanDnsName = resp.ScanDnsName + dbcs.Status.Network.ListenerPort = resp.ListenerPort + dbcs.Status.Network.HostName = *resp.Hostname + dbcs.Status.Network.DomainName = *resp.Domain + if dbcs.Spec.KMSConfig.CompartmentId != "" { + dbcs.Status.KMSDetailsStatus.CompartmentId = dbcs.Spec.KMSConfig.CompartmentId + dbcs.Status.KMSDetailsStatus.VaultName = dbcs.Spec.KMSConfig.VaultName + } + dbcs.Status.State = databasev4.LifecycleState(resp.LifecycleState) + if dbcs.Spec.KMSConfig.CompartmentId != "" { + dbcs.Status.KMSDetailsStatus.CompartmentId = dbcs.Spec.KMSConfig.CompartmentId + dbcs.Status.KMSDetailsStatus.VaultName = dbcs.Spec.KMSConfig.VaultName + } + + sname, vcnId, err := getSubnetName(*resp.SubnetId, nwClient) + + if err == nil { + dbcs.Status.Network.SubnetName = sname + vcnName, err := getVcnName(vcnId, nwClient) + + if err == nil { + dbcs.Status.Network.VcnName = vcnName + } + + } + + // Work Request Ststaus + dbWorkRequest := databasev4.DbWorkrequests{} + + dbWorks, err := getWorkRequest(*resp.OpcRequestId, wrClient, dbcs) + if err == nil { + for _, dbWork := range dbWorks { + //status := checkValue(dbcs, dbWork.Id) + // if status != 0 { + dbWorkRequest.OperationId = dbWork.Id + dbWorkRequest.OperationType = dbWork.OperationType + dbWorkRequest.PercentComplete = fmt.Sprint(*dbWork.PercentComplete) //strconv.FormatFloat(dbWork.PercentComplete, 'E', -1, 32) + if dbWork.TimeAccepted != nil { + dbWorkRequest.TimeAccepted = dbWork.TimeAccepted.String() + } + if dbWork.TimeFinished != nil { + dbWorkRequest.TimeFinished = dbWork.TimeFinished.String() + } + if dbWork.TimeStarted != nil { + dbWorkRequest.TimeStarted = dbWork.TimeStarted.String() + } + + if dbWorkRequest != (databasev4.DbWorkrequests{}) { + status := checkValue(dbcs, dbWork.Id) + if status == 0 { + dbcs.Status.WorkRequests = append(dbcs.Status.WorkRequests, dbWorkRequest) + dbWorkRequest = databasev4.DbWorkrequests{} + } else { + setValue(dbcs, dbWorkRequest) + } + } + //} + } + } + + // DB Home Status + dbcs.Status.DbInfo = dbcs.Status.DbInfo[:0] + dbStatus := databasev4.DbStatus{} + + dbHomes, err := getDbHomeList(dbClient, dbcs) + + if err == nil { + for _, dbHome := range dbHomes { + dbDetails, err := getDList(dbClient, dbcs, dbHome.Id) + for _, dbDetail := range dbDetails { + if err == nil { + dbStatus.Id = dbDetail.Id + dbStatus.DbHomeId = *dbDetail.DbHomeId + dbStatus.DbName = *dbDetail.DbName + dbStatus.DbUniqueName = *dbDetail.DbUniqueName + dbStatus.DbWorkload = *dbDetail.DbWorkload + } + dbcs.Status.DbInfo = append(dbcs.Status.DbInfo, dbStatus) + dbStatus = databasev4.DbStatus{} + } + } + } + return nil +} + +func getDbHomeList(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) ([]database.DbHomeSummary, error) { + + var items []database.DbHomeSummary + dbcsId := *dbcs.Spec.Id + + dbcsReq := database.ListDbHomesRequest{ + DbSystemId: &dbcsId, + CompartmentId: &dbcs.Spec.DbSystem.CompartmentId, + } + + resp, err := dbClient.ListDbHomes(context.TODO(), dbcsReq) + if err != nil { + return items, err + } + + return resp.Items, nil +} + +func getDList(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbHomeId *string) ([]database.DatabaseSummary, error) { + + dbcsId := *dbcs.Spec.Id + var items []database.DatabaseSummary + dbcsReq := database.ListDatabasesRequest{ + SystemId: &dbcsId, + CompartmentId: &dbcs.Spec.DbSystem.CompartmentId, + DbHomeId: dbHomeId, + } + + resp, err := dbClient.ListDatabases(context.TODO(), dbcsReq) + if err != nil { + return items, err + } + + return resp.Items, nil +} + +func getSubnetName(subnetId string, nwClient core.VirtualNetworkClient) (*string, *string, error) { + + req := core.GetSubnetRequest{SubnetId: common.String(subnetId)} + + // Send the request using the service client + resp, err := nwClient.GetSubnet(context.Background(), req) + + if err != nil { + return nil, nil, err + } + // Retrieve value from the response. + + return resp.DisplayName, resp.VcnId, nil +} + +func getVcnName(vcnId *string, nwClient core.VirtualNetworkClient) (*string, error) { + + req := core.GetVcnRequest{VcnId: common.String(*vcnId)} + + // Send the request using the service client + resp, err := nwClient.GetVcn(context.Background(), req) + + if err != nil { + return nil, err + } + // Retrieve value from the response. + + return resp.DisplayName, nil +} + +// =========== validate Specs ============ +func ValidateSpex(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, eRecord record.EventRecorder) error { + + //var str1 string + var eventMsg string + var eventErr string = "Spec Error" + lastSuccSpec, err := dbcs.GetLastSuccessfulSpec() + if err != nil { + return err + } + // Check if last Successful update nil or not + if lastSuccSpec == nil { + if dbcs.Spec.DbSystem.DbVersion != "" { + _, err = GetDbLatestVersion(dbClient, dbcs, "") + if err != nil { + eventMsg = "DBCS CRD resource " + GetFmtStr(dbcs.Name) + " DbVersion " + GetFmtStr(dbcs.Spec.DbSystem.DbVersion) + " is not matching available DB releases." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + } else { + eventMsg = "DBCS CRD resource " + "DbVersion " + GetFmtStr(dbcs.Name) + GetFmtStr("dbcs.Spec.DbSystem.DbVersion") + " cannot be a empty string." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + if dbcs.Spec.DbSystem.DbWorkload != "" { + _, err = getDbWorkLoadType(dbcs) + if err != nil { + eventMsg = "DBCS CRD resource " + GetFmtStr(dbcs.Name) + " DbWorkload " + GetFmtStr(dbcs.Spec.DbSystem.DbWorkload) + " is not matching the DBworkload type OLTP|DSS." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + } else { + eventMsg = "DBCS CRD resource " + "DbWorkload " + GetFmtStr(dbcs.Name) + GetFmtStr("dbcs.Spec.DbSystem.DbWorkload") + " cannot be a empty string." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + + if dbcs.Spec.DbSystem.NodeCount != nil { + switch *dbcs.Spec.DbSystem.NodeCount { + case 1: + case 2: + default: + eventMsg = "DBCS CRD resource " + "NodeCount " + GetFmtStr(dbcs.Name) + GetFmtStr("dbcs.Spec.DbSystem.NodeCount") + " can be either 1 or 2." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + } + + } else { + if lastSuccSpec.DbSystem.DbVersion != dbcs.Spec.DbSystem.DbVersion { + eventMsg = "DBCS CRD resource " + "DbVersion " + GetFmtStr(dbcs.Name) + GetFmtStr("dbcs.Spec.DbSystem.DbVersion") + " cannot be a empty string." + eRecord.Eventf(dbcs, corev1.EventTypeWarning, eventErr, eventMsg) + return err + } + + } + + return nil + +} diff --git a/commons/dbcssystem/dcommon.go b/commons/dbcssystem/dcommon.go new file mode 100644 index 00000000..beaa7c38 --- /dev/null +++ b/commons/dbcssystem/dcommon.go @@ -0,0 +1,449 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package common + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" + "sigs.k8s.io/controller-runtime/pkg/client" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +func GetDbHomeDetails(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) (database.CreateDbHomeDetails, error) { + + dbHomeDetails := database.CreateDbHomeDetails{} + + dbHomeReq, err := GetDbLatestVersion(dbClient, dbcs, "") + if err != nil { + return database.CreateDbHomeDetails{}, err + } + dbHomeDetails.DbVersion = &dbHomeReq + + dbDetailsReq, err := GetDBDetails(kubeClient, dbcs) + if err != nil { + return database.CreateDbHomeDetails{}, err + } + + dbHomeDetails.Database = &dbDetailsReq + + return dbHomeDetails, nil +} + +func GetDbLatestVersion(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbSystemId string) (string, error) { + + //var provisionedDbcsSystemId string + ctx := context.TODO() + var version database.DbVersionSummary + var sFlag int = 0 + var val int + + dbVersionReq := database.ListDbVersionsRequest{} + if dbSystemId != "" { + dbVersionReq.DbSystemId = common.String(dbSystemId) + } + + dbVersionReq.IsDatabaseSoftwareImageSupported = common.Bool(true) + dbVersionReq.IsUpgradeSupported = common.Bool(false) + dbVersionReq.CompartmentId = common.String(dbcs.Spec.DbSystem.CompartmentId) + dbVersionReq.DbSystemShape = common.String(dbcs.Spec.DbSystem.Shape) + // Send the request using the service client + req := database.ListDbVersionsRequest(dbVersionReq) + + resp, err := dbClient.ListDbVersions(ctx, req) + + if err != nil { + return "", err + } + + if dbcs.Spec.DbSystem.DbVersion != "" { + for i := len(resp.Items) - 1; i >= 0; i-- { + version = resp.Items[i] + s1 := getStr(*version.Version, 2) + s2 := getStr(dbcs.Spec.DbSystem.DbVersion, 2) + if strings.EqualFold(s1, s2) { + val, _ = strconv.Atoi(s1) + if val >= 18 && val <= 21 { + s3 := s1 + "c" + if strings.EqualFold(s3, dbcs.Spec.DbSystem.DbVersion) { + sFlag = 1 + break + } + } else if val >= 23 { + s3 := s1 + "ai" + if strings.EqualFold(s3, dbcs.Spec.DbSystem.DbVersion) { + sFlag = 1 + break + } + } else if val < 18 && val >= 11 { + s4 := getStr(*version.Version, 4) + if strings.EqualFold(s4, dbcs.Spec.DbSystem.DbVersion) { + sFlag = 1 + break + } + } + + } + } + } + + if sFlag == 1 { + return *version.Version, nil + } + + return *version.Version, fmt.Errorf("no database version matched") +} + +func getStr(str1 string, num int) string { + return str1[0:num] +} + +func GetDBDetails(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (database.CreateDatabaseDetails, error) { + dbDetails := database.CreateDatabaseDetails{} + var val database.CreateDatabaseDetailsDbWorkloadEnum + + if dbcs.Spec.DbSystem.TdeWalletPasswordSecret != "" { + tdePasswd, err := GetTdePassword(kubeClient, dbcs) + if err != nil { + return database.CreateDatabaseDetails{}, err + } + tdePassword := strings.Trim(strings.TrimSuffix(tdePasswd, "\n"), "\"") + dbDetails.TdeWalletPassword = &tdePassword + //fmt.Print(tdePassword) + + } + + adminPasswd, err := GetAdminPassword(kubeClient, dbcs) + if err != nil { + return database.CreateDatabaseDetails{}, err + } + + adminPassword := strings.Trim(strings.TrimSuffix(adminPasswd, "\n"), "\"") + dbDetails.AdminPassword = &adminPassword + //fmt.Print(adminPassword) + if dbcs.Spec.DbSystem.DbName != "" { + dbDetails.DbName = common.String(dbcs.Spec.DbSystem.DbName) + } + + if dbcs.Spec.DbSystem.DbWorkload != "" { + val, err = getDbWorkLoadType(dbcs) + if err != nil { + return dbDetails, err + } else { + dbDetails.DbWorkload = database.CreateDatabaseDetailsDbWorkloadEnum(val) + } + } + dbDetails.DbName = common.String(dbcs.Spec.DbSystem.DbName) + if dbcs.Spec.DbSystem.PdbName != "" { + dbDetails.PdbName = &dbcs.Spec.DbSystem.PdbName + } + + //backup configuration + if dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled != nil { + if *dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled { + backupConfig, err := getBackupConfig(kubeClient, dbcs) + if err != nil { + return dbDetails, err + } else { + dbDetails.DbBackupConfig = &backupConfig + } + } + } + + return dbDetails, nil +} + +func getBackupConfig(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (database.DbBackupConfig, error) { + backupConfig := database.DbBackupConfig{} + + if dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled != nil { + if *dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled { + backupConfig.AutoBackupEnabled = dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled + val1, err := getBackupWindowEnum(dbcs) + if err != nil { + return backupConfig, err + } else { + backupConfig.AutoBackupWindow = database.DbBackupConfigAutoBackupWindowEnum(val1) + } + } + + if dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays != nil { + val1, err := getRecoveryWindowsInDays(dbcs) + if err != nil { + return backupConfig, err + } else { + backupConfig.RecoveryWindowInDays = common.Int(val1) + } + + } + } + + return backupConfig, nil +} + +func getBackupWindowEnum(dbcs *databasev4.DbcsSystem) (database.DbBackupConfigAutoBackupWindowEnum, error) { + + if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_ONE" { + return database.DbBackupConfigAutoBackupWindowOne, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_TWO" { + return database.DbBackupConfigAutoBackupWindowTwo, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_THREE" { + return database.DbBackupConfigAutoBackupWindowThree, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_FOUR" { + return database.DbBackupConfigAutoBackupWindowFour, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_FOUR" { + return database.DbBackupConfigAutoBackupWindowFour, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_FIVE" { + return database.DbBackupConfigAutoBackupWindowFive, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_SIX" { + return database.DbBackupConfigAutoBackupWindowSix, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_SEVEN" { + return database.DbBackupConfigAutoBackupWindowSeven, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_EIGHT" { + return database.DbBackupConfigAutoBackupWindowEight, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_NINE" { + return database.DbBackupConfigAutoBackupWindowNine, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_TEN" { + return database.DbBackupConfigAutoBackupWindowTen, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_ELEVEN" { + return database.DbBackupConfigAutoBackupWindowEleven, nil + } else if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_TWELVE" { + return database.DbBackupConfigAutoBackupWindowTwelve, nil + } else { + return database.DbBackupConfigAutoBackupWindowOne, nil + } + + //return database.DbBackupConfigAutoBackupWindowEight, fmt.Errorf("AutoBackupWindow values can be SLOT_ONE|SLOT_TWO|SLOT_THREE|SLOT_FOUR|SLOT_FIVE|SLOT_SIX|SLOT_SEVEN|SLOT_EIGHT|SLOT_NINE|SLOT_TEN|SLOT_ELEVEN|SLOT_TWELEVE. The current value set to " + *dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) +} + +func getRecoveryWindowsInDays(dbcs *databasev4.DbcsSystem) (int, error) { + + var days int + + switch *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays { + case 7: + return *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, nil + case 15: + return *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, nil + case 30: + return *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, nil + case 45: + return *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, nil + case 60: + return *dbcs.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, nil + default: + days = 30 + return days, nil + } + //return days, fmt.Errorf("RecoveryWindowsInDays values can be 7|15|30|45|60 Days.") +} + +func GetDBSystemopts( + dbcs *databasev4.DbcsSystem) database.DbSystemOptions { + + dbSystemOpt := database.DbSystemOptions{} + + if dbcs.Spec.DbSystem.StorageManagement != "" { + switch dbcs.Spec.DbSystem.StorageManagement { + case "LVM": + dbSystemOpt.StorageManagement = database.DbSystemOptionsStorageManagementLvm + case "ASM": + dbSystemOpt.StorageManagement = database.DbSystemOptionsStorageManagementAsm + default: + dbSystemOpt.StorageManagement = database.DbSystemOptionsStorageManagementAsm + } + } else { + dbSystemOpt.StorageManagement = database.DbSystemOptionsStorageManagementAsm + } + + return dbSystemOpt +} + +func getLicenceModel(dbcs *databasev4.DbcsSystem) database.DbSystemLicenseModelEnum { + if dbcs.Spec.DbSystem.LicenseModel == "BRING_YOUR_OWN_LICENSE" { + return database.DbSystemLicenseModelBringYourOwnLicense + + } + return database.DbSystemLicenseModelLicenseIncluded +} + +func getDbWorkLoadType(dbcs *databasev4.DbcsSystem) (database.CreateDatabaseDetailsDbWorkloadEnum, error) { + + if strings.ToUpper(dbcs.Spec.DbSystem.DbWorkload) == "OLTP" { + + return database.CreateDatabaseDetailsDbWorkloadOltp, nil + } + if strings.ToUpper(dbcs.Spec.DbSystem.DbWorkload) == "DSS" { + return database.CreateDatabaseDetailsDbWorkloadDss, nil + + } + + return database.CreateDatabaseDetailsDbWorkloadDss, fmt.Errorf("DbWorkload values can be OLTP|DSS. The current value set to " + dbcs.Spec.DbSystem.DbWorkload) +} + +func GetNodeCount( + dbcs *databasev4.DbcsSystem) int { + + if dbcs.Spec.DbSystem.NodeCount != nil { + return *dbcs.Spec.DbSystem.NodeCount + } else { + return 1 + } +} + +func GetInitialStorage( + dbcs *databasev4.DbcsSystem) int { + + if dbcs.Spec.DbSystem.InitialDataStorageSizeInGB > 0 { + return dbcs.Spec.DbSystem.InitialDataStorageSizeInGB + } + return 256 +} + +func GetDBEdition(dbcs *databasev4.DbcsSystem) database.LaunchDbSystemDetailsDatabaseEditionEnum { + + if dbcs.Spec.DbSystem.ClusterName != "" { + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEditionExtremePerformance + } + + if dbcs.Spec.DbSystem.DbEdition != "" { + switch dbcs.Spec.DbSystem.DbEdition { + case "STANDARD_EDITION": + return database.LaunchDbSystemDetailsDatabaseEditionStandardEdition + case "ENTERPRISE_EDITION": + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEdition + case "ENTERPRISE_EDITION_HIGH_PERFORMANCE": + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEditionHighPerformance + case "ENTERPRISE_EDITION_EXTREME_PERFORMANCE": + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEditionExtremePerformance + default: + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEdition + } + } + + return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEdition +} + +func GetDBbDiskRedundancy( + dbcs *databasev4.DbcsSystem) database.LaunchDbSystemDetailsDiskRedundancyEnum { + + if dbcs.Spec.DbSystem.ClusterName != "" { + return database.LaunchDbSystemDetailsDiskRedundancyHigh + } + + switch dbcs.Spec.DbSystem.DiskRedundancy { + case "HIGH": + return database.LaunchDbSystemDetailsDiskRedundancyHigh + case "NORMAL": + return database.LaunchDbSystemDetailsDiskRedundancyNormal + } + + return database.LaunchDbSystemDetailsDiskRedundancyNormal +} + +func getWorkRequest(workId string, wrClient workrequests.WorkRequestClient, dbcs *databasev4.DbcsSystem) ([]workrequests.WorkRequestSummary, error) { + var workReq []workrequests.WorkRequestSummary + + req := workrequests.ListWorkRequestsRequest{CompartmentId: &dbcs.Spec.DbSystem.CompartmentId, OpcRequestId: &workId, ResourceId: dbcs.Spec.Id} + resp, err := wrClient.ListWorkRequests(context.Background(), req) + if err != nil { + return workReq, err + } + + return resp.Items, nil +} + +func GetKeyValue(str1 string) string { + list1 := strings.Split(str1, " ") + for _, value := range list1 { + val1 := strings.Split(value, "=") + if val1[0] == "version" { + return val1[1] + } + } + + return "noversion" +} + +func GetFmtStr(pstr string) string { + + return "[" + pstr + "]" +} + +func checkValue(dbcs *databasev4.DbcsSystem, workId *string) int { + + var status int = 0 + //dbWorkRequest := databasev4.DbWorkrequests{} + + if len(dbcs.Status.WorkRequests) > 0 { + for _, v := range dbcs.Status.WorkRequests { + if *v.OperationId == *workId { + status = 1 + } + } + } + + return status +} +func setValue(dbcs *databasev4.DbcsSystem, dbWorkRequest databasev4.DbWorkrequests) { + + //var status int = 1 + //dbWorkRequest := databasev4.DbWorkrequests{} + var counter int = 0 + if len(dbcs.Status.WorkRequests) > 0 { + for _, v := range dbcs.Status.WorkRequests { + if *v.OperationId == *dbWorkRequest.OperationId { + dbcs.Status.WorkRequests[counter].OperationId = dbWorkRequest.OperationId + dbcs.Status.WorkRequests[counter].OperationType = dbWorkRequest.OperationType + dbcs.Status.WorkRequests[counter].PercentComplete = dbWorkRequest.PercentComplete + dbcs.Status.WorkRequests[counter].TimeAccepted = dbWorkRequest.TimeAccepted + dbcs.Status.WorkRequests[counter].TimeFinished = dbWorkRequest.TimeFinished + dbcs.Status.WorkRequests[counter].TimeStarted = dbWorkRequest.TimeStarted + } + counter = counter + 1 + } + } + +} diff --git a/commons/finalizer/finalizer.go b/commons/finalizer/finalizer.go index bafe110e..169cbaef 100644 --- a/commons/finalizer/finalizer.go +++ b/commons/finalizer/finalizer.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** diff --git a/commons/autonomousdatabase/reconciler_util.go b/commons/k8s/create.go similarity index 53% rename from commons/autonomousdatabase/reconciler_util.go rename to commons/k8s/create.go index 2ceac5e6..cd836af7 100644 --- a/commons/autonomousdatabase/reconciler_util.go +++ b/commons/k8s/create.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -36,47 +36,23 @@ ** SOFTWARE. */ -package autonomousdatabase +package k8s import ( "context" - "fmt" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" - "github.com/oracle/oci-go-sdk/v45/secrets" - - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/commons/oci" ) -// SetStatus sets the status subresource. -func SetStatus(kubeClient client.Client, adb *dbv1alpha1.AutonomousDatabase) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - curADB := &dbv1alpha1.AutonomousDatabase{} - - namespacedName := types.NamespacedName{ - Namespace: adb.GetNamespace(), - Name: adb.GetName(), - } - - if err := kubeClient.Get(context.TODO(), namespacedName, curADB); err != nil { - return err - } - - curADB.Status = adb.Status - return kubeClient.Status().Update(context.TODO(), curADB) - }) -} +func CreateSecret(kubeClient client.Client, namespace string, name string, data map[string][]byte, owner client.Object, label map[string]string) error { + ownerReference := NewOwnerReference(owner) -func createWalletSecret(kubeClient client.Client, namespacedName types.NamespacedName, data map[string][]byte) error { // Create the secret with the wallet data stringData := map[string]string{} for key, val := range data { @@ -85,8 +61,10 @@ func createWalletSecret(kubeClient client.Client, namespacedName types.Namespace walletSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespacedName.Namespace, - Name: namespacedName.Name, + Namespace: namespace, + Name: name, + OwnerReferences: ownerReference, + Labels: label, }, StringData: stringData, } @@ -97,31 +75,38 @@ func createWalletSecret(kubeClient client.Client, namespacedName types.Namespace return nil } -func CreateWalletSecret(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, secretClient secrets.SecretsClient, adb *dbv1alpha1.AutonomousDatabase) error { - // Kube Secret which contains Instance Wallet - walletName := adb.Spec.Details.Wallet.Name - if walletName == nil { - walletName = common.String(adb.GetName() + "-instance-wallet") - } +func CreateAutonomousBackup(kubeClient client.Client, + backupName string, + backupSummary database.AutonomousDatabaseBackupSummary, + ownerAdb *dbv4.AutonomousDatabase) error { - // No-op if Wallet is already downloaded - walletNamespacedName := types.NamespacedName{ - Namespace: adb.GetNamespace(), - Name: *walletName, - } - walletSecret := &corev1.Secret{} - if err := kubeClient.Get(context.TODO(), walletNamespacedName, walletSecret); err == nil { - return nil + backup := &dbv4.AutonomousDatabaseBackup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ownerAdb.GetNamespace(), + Name: backupName, + OwnerReferences: NewOwnerReference(ownerAdb), + Labels: map[string]string{ + "adb": ownerAdb.Name, + }, + }, + Spec: dbv4.AutonomousDatabaseBackupSpec{ + Target: dbv4.TargetSpec{ + K8sAdb: dbv4.K8sAdbSpec{ + Name: common.String(ownerAdb.Name), + }, + }, + DisplayName: backupSummary.DisplayName, + AutonomousDatabaseBackupOCID: backupSummary.Id, + OCIConfig: dbv4.OciConfigSpec{ + ConfigMapName: ownerAdb.Spec.OciConfig.ConfigMapName, + SecretName: ownerAdb.Spec.OciConfig.SecretName, + }, + }, } - data, err := oci.GetWallet(logger, kubeClient, dbClient, secretClient, adb) - if err != nil { + if err := kubeClient.Create(context.TODO(), backup); err != nil { return err } - if err := createWalletSecret(kubeClient, walletNamespacedName, data); err != nil { - return err - } - logger.Info(fmt.Sprintf("Wallet is stored in the Secret %s", *walletName)) return nil } diff --git a/commons/k8s/fetch.go b/commons/k8s/fetch.go new file mode 100644 index 00000000..617abdb5 --- /dev/null +++ b/commons/k8s/fetch.go @@ -0,0 +1,155 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package k8s + +import ( + "context" + "errors" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +func FetchResource(kubeClient client.Client, namespace string, name string, object client.Object) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + if err := kubeClient.Get(context.TODO(), namespacedName, object); err != nil { + return err + } + + return nil +} + +// Returns the first AutonomousDatabase resource that matches the AutonomousDatabaseOCID of the backup +// Sometimes the AutonomousDatabase doesn't exist. It could happen if a user simply want to restore or +// backup the AutonomousDatabase without creating an AutonomousDatabase rersource in the cluster. +// If there isn't an AutonomousDatabase with the same OCID, a nil is returned. +func FetchAutonomousDatabaseWithOCID(kubeClient client.Client, namespace string, ocid string) (*dbv4.AutonomousDatabase, error) { + adbList, err := fetchAutonomousDatabases(kubeClient, namespace) + if err != nil { + return nil, err + } + + for _, adb := range adbList.Items { + if adb.Spec.Details.Id != nil && *adb.Spec.Details.Id == ocid { + return &adb, nil + } + } + + return nil, nil +} + +func fetchAutonomousDatabases(kubeClient client.Client, namespace string) (*dbv4.AutonomousDatabaseList, error) { + // Get the list of AutonomousDatabaseBackupOCID in the same namespace + adbList := &dbv4.AutonomousDatabaseList{} + + if err := kubeClient.List(context.TODO(), adbList, &client.ListOptions{Namespace: namespace}); err != nil { + // Ignore not-found errors, since they can't be fixed by an immediate requeue. + // No need to change the since we don't know if we obtain the object. + if !apiErrors.IsNotFound(err) { + return adbList, err + } + } + + return adbList, nil +} + +func FetchAutonomousDatabaseBackups(kubeClient client.Client, namespace string, adbName string) (*dbv4.AutonomousDatabaseBackupList, error) { + // Get the list of AutonomousDatabaseBackupOCID in the same namespace + backupList := &dbv4.AutonomousDatabaseBackupList{} + + // Create a label selector + selector := labels.Set{"adb": adbName}.AsSelector() + + if err := kubeClient.List( + context.TODO(), + backupList, + &client.ListOptions{ + Namespace: namespace, + LabelSelector: selector, + }); err != nil { + // Ignore not-found errors, since they can't be fixed by an immediate requeue. + // No need to change the since we don't know if we obtain the object. + if !apiErrors.IsNotFound(err) { + return backupList, err + } + } + + return backupList, nil +} + +func FetchConfigMap(kubeClient client.Client, namespace string, name string) (*corev1.ConfigMap, error) { + configMap := &corev1.ConfigMap{} + + if err := FetchResource(kubeClient, namespace, name, configMap); err != nil { + return nil, err + } + + return configMap, nil +} + +func FetchSecret(kubeClient client.Client, namespace string, name string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + + if err := FetchResource(kubeClient, namespace, name, secret); err != nil { + return nil, err + } + + return secret, nil +} + +func GetSecretValue(kubeClient client.Client, namespace string, name string, key string) (string, error) { + secret, err := FetchSecret(kubeClient, namespace, name) + if err != nil { + return "", err + } + + val, ok := secret.Data[key] + if !ok { + return "", errors.New("Secret key not found: " + key) + } + return string(val), nil +} diff --git a/commons/k8s/finalizer.go b/commons/k8s/finalizer.go new file mode 100644 index 00000000..015bbc61 --- /dev/null +++ b/commons/k8s/finalizer.go @@ -0,0 +1,92 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package k8s + +import ( + "context" + "encoding/json" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func patchFinalizer(kubeClient client.Client, obj client.Object) error { + finalizer := obj.GetFinalizers() + + payload := []patchValue{} + + if obj.GetFinalizers() == nil { + payload = append(payload, patchValue{ + Op: "replace", + Path: "/metadata/finalizers", + Value: []string{}, + }) + } + + payload = append(payload, patchValue{ + Op: "replace", + Path: "/metadata/finalizers", + Value: finalizer, + }) + + payloadBytes, err := json.Marshal(payload) + if err != nil { + return err + } + + patch := client.RawPatch(types.JSONPatchType, payloadBytes) + return kubeClient.Patch(context.TODO(), obj, patch) +} + +func AddFinalizerAndPatch(kubeClient client.Client, obj client.Object, finalizer string) error { + controllerutil.AddFinalizer(obj, finalizer) + if err := patchFinalizer(kubeClient, obj); err != nil { + return err + } + return nil +} + +func RemoveFinalizerAndPatch(kubeClient client.Client, obj client.Object, finalizer string) error { + controllerutil.RemoveFinalizer(obj, finalizer) + if err := patchFinalizer(kubeClient, obj); err != nil { + return err + } + return nil +} diff --git a/commons/k8s/utils.go b/commons/k8s/utils.go new file mode 100644 index 00000000..37ec1a3f --- /dev/null +++ b/commons/k8s/utils.go @@ -0,0 +1,86 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package k8s + +import ( + "context" + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilErrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func NewOwnerReference(owner client.Object) []metav1.OwnerReference { + ownerRef := []metav1.OwnerReference{ + { + Kind: owner.GetObjectKind().GroupVersionKind().Kind, + APIVersion: owner.GetObjectKind().GroupVersionKind().GroupVersion().String(), + Name: owner.GetName(), + UID: owner.GetUID(), + }, + } + return ownerRef +} + +func CombineErrors(errs ...error) error { + return utilErrors.NewAggregate(errs) +} + +/********************** + Patch resource +**********************/ + +type patchValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +func Patch(kubeClient client.Client, obj client.Object, path string, value interface{}) error { + payload := []patchValue{{ + Op: "replace", + Path: path, + Value: value, + }} + payloadBytes, _ := json.Marshal(payload) + patch := client.RawPatch(types.JSONPatchType, payloadBytes) + return kubeClient.Patch(context.TODO(), obj, patch) +} diff --git a/commons/multitenant/lrest/common.go b/commons/multitenant/lrest/common.go new file mode 100644 index 00000000..e72e85b0 --- /dev/null +++ b/commons/multitenant/lrest/common.go @@ -0,0 +1,113 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if + one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. +*/ + +package lrest + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + + ctrl "sigs.k8s.io/controller-runtime" +) + +func CommonDecryptWithPrivKey(Key string, Buffer string, req ctrl.Request) (string, error) { + + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + fmt.Printf("Failed to parse private key %s \n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + fmt.Printf("Failed to decode encrypted string to base64: %s\n", err.Error()) + return "", err + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + fmt.Printf("Failed to decrypt string %s\n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +func ParseConfigMapData(cfgmap *corev1.ConfigMap) []string { + + var tokens []string + for Key, Value := range cfgmap.Data { + fmt.Printf("KEY:%s\n", Key) + re0 := regexp.MustCompile("\\n") + re1 := regexp.MustCompile(";") + re2 := regexp.MustCompile(",") /* Additional separator for future use */ + + Value = re0.ReplaceAllString(Value, " ") + tokens = strings.Split(Value, " ") + + for cnt := range tokens { + if len(tokens[cnt]) != 0 { + tokens[cnt] = re1.ReplaceAllString(tokens[cnt], " ") + tokens[cnt] = re2.ReplaceAllString(tokens[cnt], " ") + + } + + } + + } + + return tokens + +} diff --git a/commons/observability/constants.go b/commons/observability/constants.go new file mode 100644 index 00000000..45f06e49 --- /dev/null +++ b/commons/observability/constants.go @@ -0,0 +1,175 @@ +package observability + +import ( + v4 "github.com/oracle/oracle-database-operator/apis/observability/v4" +) + +const ( + UnknownValue = "UNKNOWN" + DefaultValue = "DEFAULT" +) + +// Observability Status +const ( + StatusObservabilityPending v4.StatusEnum = "PENDING" + StatusObservabilityError v4.StatusEnum = "ERROR" + StatusObservabilityReady v4.StatusEnum = "READY" +) + +// Log Names +const ( + LogReconcile = "ObservabilityExporterLogger" + LogExportersDeploy = "ObservabilityExporterDeploymentLogger" + LogExportersSVC = "ObservabilityExporterServiceLogger" + LogExportersServiceMonitor = "ObservabilityExporterServiceMonitorLogger" +) + +// Defaults +const ( + DefaultDbUserKey = "username" + DefaultDBPasswordKey = "password" + DefaultDBConnectionStringKey = "connection" + DefaultConfigVolumeString = "config-volume" + DefaultLogFilename = "alert.log" + DefaultLogVolumeString = "log-volume" + DefaultWalletVolumeString = "creds" + DefaultOCIPrivateKeyVolumeString = "ocikey" + DefaultOCIConfigFingerprintKey = "fingerprint" + DefaultOCIConfigRegionKey = "region" + DefaultOCIConfigTenancyKey = "tenancy" + DefaultOCIConfigUserKey = "user" + + DefaultExporterImage = "container-registry.oracle.com/database/observability-exporter:1.5.2" + DefaultServicePort = 9161 + DefaultServiceTargetPort = 9161 + DefaultAppPort = 8080 + DefaultPrometheusPort = "metrics" + DefaultServiceType = "ClusterIP" + DefaultReplicaCount = 1 + DefaultExporterConfigMountRootPath = "/oracle/observability" + DefaultOracleHome = "/lib/oracle/21/client64/lib" + DefaultOracleTNSAdmin = DefaultOracleHome + "/network/admin" + DefaultExporterConfigmapFilename = "config.toml" + DefaultVaultPrivateKeyRootPath = "/oracle/config" + DefaultPrivateKeyFileKey = "privatekey" + DefaultPrivateKeyFileName = "private.pem" + DefaultVaultPrivateKeyAbsolutePath = DefaultVaultPrivateKeyRootPath + "/" + DefaultPrivateKeyFileName + DefaultExporterConfigmapAbsolutePath = DefaultExporterConfigMountRootPath + "/" + DefaultExporterConfigmapFilename +) + +// labeling +const ( + DefaultSelectorLabelKey = "app" + DefaultReleaseLabelKey = "release" +) + +// default resource +const ( + DefaultExporterContainerName = "observability-exporter" +) + +// Known environment variables +const ( + EnvVarOracleHome = "ORACLE_HOME" + EnvVarDataSourceUser = "DB_USERNAME" + EnvVarDataSourcePassword = "DB_PASSWORD" + EnvVarDataSourceConnectString = "DB_CONNECT_STRING" + EnvVarDataSourceLogDestination = "LOG_DESTINATION" + EnvVarDataSourcePwdVaultSecretName = "VAULT_SECRET_NAME" + EnvVarDataSourcePwdVaultId = "VAULT_ID" + EnvVarCustomConfigmap = "CUSTOM_METRICS" + EnvVarTNSAdmin = "TNS_ADMIN" + EnvVarVaultTenancyOCID = "vault_tenancy_ocid" + EnvVarVaultUserOCID = "vault_user_ocid" + EnvVarVaultFingerprint = "vault_fingerprint" + EnvVarVaultPrivateKeyPath = "vault_private_key_path" + EnvVarVaultRegion = "vault_region" +) + +// Positive ConditionTypes +const ( + IsCRAvailable = "ExporterReady" + IsExporterDeploymentReady = "DeploymentReady" + IsExporterServiceReady = "ServiceReady" + IsExporterServiceMonitorReady = "ServiceMonitorReady" +) + +// Reason +const ( + ReasonInitStart = "InitializationStarted" + ReasonReadyValidated = "ReadinessValidated" + ReasonValidationInProgress = "ReadinessValidationInProgress" + ReasonReadyFailed = "ReadinessValidationFailed" + ReasonDeploymentSpecValidationFailed = "SpecValidationFailed" + + ReasonDeploymentSuccessful = "ResourceDeployed" + ReasonResourceUpdated = "ResourceUpdated" + ReasonResourceUpdateFailed = "ResourceUpdateFailed" + ReasonDeploymentFailed = "ResourceDeploymentFailed" + ReasonDeploymentPending = "ResourceDeploymentInProgress" + + ReasonGeneralResourceGenerationFailed = "ResourceGenerationFailed" + ReasonGeneralResourceCreated = "ResourceCreated" + ReasonGeneralResourceCreationFailed = "ResourceCreationFailed" + ReasonGeneralResourceValidationCompleted = "ResourceDeployed" + ReasonGeneralResourceValidationFailureDueToError = "ResourceCouldNotBeValidated" +) + +// Log Errors +const ( + ErrorCRRetrieve = "an error occurred with retrieving the cr" + ErrorStatusUpdate = "an error occurred with updating the cr status" + ErrorSpecValidationFailedDueToAnError = "an error occurred with validating the exporter deployment spec" + ErrorDeploymentPodsFailure = "an error occurred with deploying exporter deployment pods" + ErrorResourceCreationFailure = "an error occurred with creating databaseobserver resource" + ErrorResourceRetrievalFailureDueToAnError = "an error occurred with retrieving databaseobserver resource" + LogErrorWithResourceUpdate = "an error occurred with updating resource" +) + +// Log Infos +const ( + LogCRStart = "Started DatabaseObserver instance reconciliation" + LogCREnd = "Ended DatabaseObserver instance reconciliation, resource must have been deleted." + LogResourceCreated = "Created DatabaseObserver resource successfully" + LogResourceUpdated = "Updated DatabaseObserver resource successfully" + LogResourceFound = "Validated DatabaseObserver resource readiness" + LogSuccessWithResourceUpdate = "Updated DatabaseObserver resource successfully" +) + +// Messages +const ( + MessageCRInitializationStarted = "Started initialization of custom resource" + MessageCRValidated = "Completed validation of custom resource readiness successfully" + MessageCRValidationFailed = "Failed to validate readiness of custom resource due to an error" + MessageCRValidationWaiting = "Waiting for other resources to be ready to fully validate readiness" + + MessageResourceCreated = "Completed creation of resource successfully" + MessageResourceCreationFailed = "Failed to create resource due to an error" + MessageResourceReadinessValidated = "Completed validation of resource readiness" + MessageResourceReadinessValidationFailed = "Failed to validate resource due to an error retrieving resource" + MessageResourceGenerationFailed = "Failed to generate resource due to an error" + + MessageExporterDeploymentSpecValidationFailed = "Failed to validate export deployment spec due to an error with the spec" + MessageExporterResourceUpdateFailed = "Failed to update exporter resource due to an error" + MessageExporterResourceUpdated = "Updated exporter resource successfully" + MessageExporterDeploymentValidationFailed = "Failed to validate exporter deployment due to an error retrieving resource" + MessageExporterDeploymentSuccessful = "Completed validation of exporter deployment readiness" + MessageExporterDeploymentFailed = "Failed to deploy exporter deployment due to PodFailure" + MessageExporterDeploymentListingFailed = "Failed to list exporter deployment pods" + MessageExporterDeploymentPending = "Waiting for exporter deployment pods to be ready" +) + +// Event Recorder Outputs +const ( + EventReasonFailedCRRetrieval = "ExporterRetrievalFailed" + EventMessageFailedCRRetrieval = "Encountered error retrieving databaseObserver instance" + + EventReasonSpecError = "DeploymentSpecValidationFailed" + EventMessageSpecErrorDBPasswordSecretMissing = "Spec validation failed due to required dbPassword secret not found" + EventMessageSpecErrorDBConnectionStringSecretMissing = "Spec validation failed due to required dbConnectionString secret not found" + EventMessageSpecErrorDBPUserSecretMissing = "Spec validation failed due to dbUser secret not found" + EventMessageSpecErrorConfigmapMissing = "Spec validation failed due to custom config configmap not found" + EventMessageSpecErrorDBWalletSecretMissing = "Spec validation failed due to provided dbWallet secret not found" + + EventReasonUpdateSucceeded = "ExporterDeploymentUpdated" +) diff --git a/commons/observability/utils.go b/commons/observability/utils.go new file mode 100644 index 00000000..6eccb261 --- /dev/null +++ b/commons/observability/utils.go @@ -0,0 +1,493 @@ +package observability + +import ( + api "github.com/oracle/oracle-database-operator/apis/observability/v4" + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "path/filepath" + "strings" +) + +func AddSidecarContainers(a *api.DatabaseObserver, listing *[]corev1.Container) { + + if containers := a.Spec.ExporterSidecars; len(containers) > 0 { + for _, container := range containers { + *listing = append(*listing, container) + } + + } +} + +func AddSidecarVolumes(a *api.DatabaseObserver, listing *[]corev1.Volume) { + + if volumes := a.Spec.SideCarVolumes; len(volumes) > 0 { + for _, v := range volumes { + *listing = append(*listing, v) + } + + } +} + +// GetLabels retrieves labels from the spec +func GetLabels(a *api.DatabaseObserver, customResourceLabels map[string]string) map[string]string { + + var l = make(map[string]string) + + // get inherited labels + if iLabels := a.Spec.InheritLabels; iLabels != nil { + for _, v := range iLabels { + if v != DefaultSelectorLabelKey { + l[v] = a.Labels[v] + } + } + } + + if customResourceLabels != nil { + for k, v := range customResourceLabels { + if k != DefaultSelectorLabelKey { + l[k] = v + } + } + } + + // add app label + l[DefaultSelectorLabelKey] = a.Name + return l +} + +// GetSelectorLabel adds selector label +func GetSelectorLabel(a *api.DatabaseObserver) map[string]string { + selectors := make(map[string]string) + selectors[DefaultSelectorLabelKey] = a.Name + return selectors +} + +// GetExporterVersion retrieves version of exporter used +func GetExporterVersion(a *api.DatabaseObserver) string { + appVersion := "latest" + whichImage := DefaultExporterImage + if img := a.Spec.Exporter.Deployment.ExporterImage; img != "" { + whichImage = img + } + + // return tag in image:tag + if str := strings.Split(whichImage, ":"); len(str) == 2 { + appVersion = str[1] + } + return appVersion +} + +// GetExporterArgs retrieves args +func GetExporterArgs(a *api.DatabaseObserver) []string { + if args := a.Spec.Exporter.Deployment.ExporterArgs; args != nil || len(args) > 0 { + return args + } + return nil +} + +// GetExporterDeploymentSecurityContext retrieves security context for container +func GetExporterDeploymentSecurityContext(a *api.DatabaseObserver) *corev1.SecurityContext { + if sc := a.Spec.Exporter.Deployment.SecurityContext; sc != nil { + return sc + } + return &corev1.SecurityContext{} +} + +// GetExporterPodSecurityContext retrieves security context for pods +func GetExporterPodSecurityContext(a *api.DatabaseObserver) *corev1.PodSecurityContext { + if sc := a.Spec.Exporter.Deployment.DeploymentPodTemplate.SecurityContext; sc != nil { + return sc + } + return &corev1.PodSecurityContext{} +} + +// GetExporterCommands retrieves commands +func GetExporterCommands(a *api.DatabaseObserver) []string { + if c := a.Spec.Exporter.Deployment.ExporterCommands; c != nil || len(c) > 0 { + return c + } + return nil +} + +// GetExporterServicePort function retrieves exporter service port from a or provides default +func GetExporterServicePort(a *api.DatabaseObserver) []corev1.ServicePort { + + servicePorts := make([]corev1.ServicePort, 0) + + // get service ports + if ports := a.Spec.Exporter.Service.Ports; len(ports) > 0 { + for _, port := range ports { + servicePorts = append(servicePorts, port) + } + + } else { + // if not, provide default service port + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: DefaultPrometheusPort, + Port: DefaultServicePort, + TargetPort: intstr.FromInt32(DefaultServiceTargetPort), + }) + } + + return servicePorts + +} + +// GetEndpoints function +func GetEndpoints(a *api.DatabaseObserver) []monitorv1.Endpoint { + + endpoints := make([]monitorv1.Endpoint, 0) + + // get endpoints + if es := a.Spec.Prometheus.ServiceMonitor.Endpoints; len(es) > 0 { + for _, e := range es { + endpoints = append(endpoints, e) + } + } + + // if not, provide default endpoint + endpoints = append(endpoints, monitorv1.Endpoint{ + Port: DefaultPrometheusPort, + Interval: "20s", + }) + + return endpoints +} + +func AddNamespaceSelector(a *api.DatabaseObserver, spec *monitorv1.ServiceMonitorSpec) { + + if ns := a.Spec.Prometheus.ServiceMonitor.NamespaceSelector; ns != nil { + a.Spec.Prometheus.ServiceMonitor.NamespaceSelector.DeepCopyInto(&spec.NamespaceSelector) + } + +} + +// GetExporterDeploymentVolumeMounts function retrieves volume mounts from a or provides default +func GetExporterDeploymentVolumeMounts(a *api.DatabaseObserver) []corev1.VolumeMount { + + volM := make([]corev1.VolumeMount, 0) + + if cVolumeSourceName := a.Spec.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { + volM = append(volM, corev1.VolumeMount{ + Name: DefaultConfigVolumeString, + MountPath: DefaultExporterConfigMountRootPath, + }) + } + + // a.Spec.Database.DBWallet.SecretName optional + // if null, consider the database NON-ADB and connect as such + if secretName := a.Spec.Database.DBWallet.SecretName; secretName != "" { + + p := DefaultOracleTNSAdmin + + // Determine what the value of TNS_ADMIN + // if custom TNS_ADMIN environment variable is set and found, use that instead as the path + if rCustomEnvs := a.Spec.Exporter.Deployment.ExporterEnvs; rCustomEnvs != nil { + if v, f := rCustomEnvs[EnvVarTNSAdmin]; f { + p = v + } + } + + volM = append(volM, corev1.VolumeMount{ + Name: DefaultWalletVolumeString, + MountPath: p, + }) + } + + // a.Spec.OCIConfig.SecretName required if vault is used + if secretName := a.Spec.OCIConfig.SecretName; secretName != "" { + volM = append(volM, corev1.VolumeMount{ + Name: DefaultOCIPrivateKeyVolumeString, + MountPath: DefaultVaultPrivateKeyRootPath, + }) + } + + // a.Spec.Log.Path path to mount for a custom log path, a volume is required + if rLogPath := a.Spec.Log.Path; rLogPath != "" { + vName := GetLogName(a) + volM = append(volM, corev1.VolumeMount{ + Name: vName, + MountPath: rLogPath, + }) + } + + return volM +} + +// GetExporterDeploymentVolumes function retrieves volumes from a or provides default +func GetExporterDeploymentVolumes(a *api.DatabaseObserver) []corev1.Volume { + + vol := make([]corev1.Volume, 0) + + // config-volume Volume + // if null, the exporter uses the default built-in config + if cVolumeSourceName := a.Spec.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { + + cVolumeSourceKey := a.Spec.ExporterConfig.Configmap.Key + cMSource := &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cVolumeSourceName, + }, + Items: []corev1.KeyToPath{{ + Key: cVolumeSourceKey, + Path: DefaultExporterConfigmapFilename, + }}, + } + + vol = append(vol, corev1.Volume{Name: DefaultConfigVolumeString, VolumeSource: corev1.VolumeSource{ConfigMap: cMSource}}) + } + + // creds Volume + // a.Spec.Database.DBWallet.SecretName optional + // if null, consider the database NON-ADB and connect as such + if secretName := a.Spec.Database.DBWallet.SecretName; secretName != "" { + + vol = append(vol, corev1.Volume{ + Name: DefaultWalletVolumeString, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }) + } + + // ocikey Volume + // a.Spec.Database.DBWallet.SecretName optional + if secretName := a.Spec.OCIConfig.SecretName; secretName != "" { + + OCIConfigSource := &corev1.SecretVolumeSource{ + SecretName: secretName, + Items: []corev1.KeyToPath{{ + Key: DefaultPrivateKeyFileKey, + Path: DefaultPrivateKeyFileName, + }}, + } + + vol = append(vol, corev1.Volume{ + Name: DefaultOCIPrivateKeyVolumeString, + VolumeSource: corev1.VolumeSource{Secret: OCIConfigSource}, + }) + } + + // log-volume Volume + if rLogPath := a.Spec.Log.Path; rLogPath != "" { + vs := GetLogVolumeSource(a) + vName := GetLogName(a) + + vol = append(vol, corev1.Volume{ + Name: vName, + VolumeSource: vs, + }) + } + + return vol +} + +// GetExporterConfig function retrieves config name for status +func GetExporterConfig(a *api.DatabaseObserver) string { + + configName := DefaultValue + if cmName := a.Spec.ExporterConfig.Configmap.Name; cmName != "" { + configName = cmName + } + + return configName +} + +func GetLogName(a *api.DatabaseObserver) string { + if name := a.Spec.Log.Volume.Name; name != "" { + return name + } + return DefaultLogVolumeString +} + +// GetLogVolumeSource function retrieves the source to help GetExporterDeploymentVolumes +func GetLogVolumeSource(a *api.DatabaseObserver) corev1.VolumeSource { + + vs := corev1.VolumeSource{} + rLogVolumeClaimName := a.Spec.Log.Volume.PersistentVolumeClaim.ClaimName + + // volume claims take precedence + if rLogVolumeClaimName != "" { + vs.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: rLogVolumeClaimName, + } + return vs + + } else { + vs.EmptyDir = &corev1.EmptyDirVolumeSource{} + return vs + } +} + +// AddEnv is a helper method that appends an Env Var value +func AddEnv(env []corev1.EnvVar, existing map[string]string, name string, v string) []corev1.EnvVar { + + // Evaluate if env already exists + if _, f := existing[name]; !f { + env = append(env, corev1.EnvVar{Name: name, Value: v}) + } + return env +} + +// AddEnvFrom is a helper method that appends an Env Var value source +func AddEnvFrom(env []corev1.EnvVar, existing map[string]string, name string, v *corev1.EnvVarSource) []corev1.EnvVar { + + // Evaluate if env already exists + if _, f := existing[name]; !f { + env = append(env, corev1.EnvVar{Name: name, ValueFrom: v}) + } + return env +} + +// GetExporterEnvs function retrieves env from a or provides default +func GetExporterEnvs(a *api.DatabaseObserver) []corev1.EnvVar { + + optional := true + rDBPasswordKey := a.Spec.Database.DBPassword.Key + rDBPasswordName := a.Spec.Database.DBPassword.SecretName + rDBConnectStrKey := a.Spec.Database.DBConnectionString.Key + rDBConnectStrName := a.Spec.Database.DBConnectionString.SecretName + rDBVaultSecretName := a.Spec.Database.DBPassword.VaultSecretName + rDBVaultOCID := a.Spec.Database.DBPassword.VaultOCID + rDBUserSKey := a.Spec.Database.DBUser.Key + rDBUserSName := a.Spec.Database.DBUser.SecretName + rOCIConfigCMName := a.Spec.OCIConfig.ConfigMapName + rLogPath := a.Spec.Log.Path + rLogFilename := a.Spec.Log.Filename + rCustomEnvs := a.Spec.Exporter.Deployment.ExporterEnvs + + var env = make([]corev1.EnvVar, 0) + + // add CustomEnvs + if rCustomEnvs != nil { + for k, v := range rCustomEnvs { + env = append(env, corev1.EnvVar{Name: k, Value: v}) + } + } + + // DB_USERNAME environment variable + if rDBUserSKey == "" { // overwrite + rDBUserSKey = DefaultDbUserKey + } + envUser := &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: rDBUserSKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rDBUserSName}, + Optional: &optional, + }} + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourceUser, envUser) + + // DB_CONNECT_STRING environment variable + if rDBConnectStrKey == "" { + rDBConnectStrKey = DefaultDBConnectionStringKey + } + envConnectStr := &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: rDBConnectStrKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rDBConnectStrName}, + Optional: &optional, + }} + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourceConnectString, envConnectStr) + + // DB_PASSWORD environment variable + // if useVault, add environment variables for Vault ID and Vault Secret Name + useVault := rDBVaultSecretName != "" && rDBVaultOCID != "" + if useVault { + + env = AddEnv(env, rCustomEnvs, EnvVarDataSourcePwdVaultSecretName, rDBVaultSecretName) + env = AddEnv(env, rCustomEnvs, EnvVarDataSourcePwdVaultId, rDBVaultOCID) + + // Configuring the configProvider prefixed with vault_ + // https://github.com/oracle/oracle-db-appdev-monitoring/blob/main/vault/vault.go + configSourceFingerprintValue := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + Key: DefaultOCIConfigFingerprintKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rOCIConfigCMName}, + Optional: &optional, + }, + } + configSourceRegionValue := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + Key: DefaultOCIConfigRegionKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rOCIConfigCMName}, + Optional: &optional, + }, + } + configSourceTenancyValue := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + Key: DefaultOCIConfigTenancyKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rOCIConfigCMName}, + Optional: &optional, + }, + } + configSourceUserValue := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + Key: DefaultOCIConfigUserKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rOCIConfigCMName}, + Optional: &optional, + }, + } + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultFingerprint, configSourceFingerprintValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultUserOCID, configSourceUserValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultTenancyOCID, configSourceTenancyValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultRegion, configSourceRegionValue) + env = AddEnv(env, rCustomEnvs, EnvVarVaultPrivateKeyPath, DefaultVaultPrivateKeyAbsolutePath) + + } else { + + if rDBPasswordKey == "" { // overwrite + rDBPasswordKey = DefaultDBPasswordKey + } + dbPassword := &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: rDBPasswordKey, + LocalObjectReference: corev1.LocalObjectReference{Name: rDBPasswordName}, + Optional: &optional, + }} + + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourcePassword, dbPassword) + + } + + // CUSTOM_METRICS environment variable + if customMetricsName := a.Spec.ExporterConfig.Configmap.Name; customMetricsName != "" { + customMetrics := DefaultExporterConfigmapAbsolutePath + + env = AddEnv(env, rCustomEnvs, EnvVarCustomConfigmap, customMetrics) + } + + env = AddEnv(env, rCustomEnvs, EnvVarOracleHome, DefaultOracleHome) + env = AddEnv(env, rCustomEnvs, EnvVarTNSAdmin, DefaultOracleTNSAdmin) + + // LOG_DESTINATION environment variable + if rLogPath != "" { + if rLogFilename == "" { + rLogFilename = DefaultLogFilename + } + d := filepath.Join(rLogPath, rLogFilename) + env = AddEnv(env, rCustomEnvs, EnvVarDataSourceLogDestination, d) + } + + return env +} + +// GetExporterReplicas function retrieves replicaCount from a or provides default +func GetExporterReplicas(a *api.DatabaseObserver) int32 { + if rc := a.Spec.Replicas; rc != 0 { + return rc + } + return int32(DefaultReplicaCount) +} + +// GetExporterImage function retrieves image from a or provides default +func GetExporterImage(a *api.DatabaseObserver) string { + if img := a.Spec.Exporter.Deployment.ExporterImage; img != "" { + return img + } + + return DefaultExporterImage + +} diff --git a/commons/oci/containerdatabase.go b/commons/oci/containerdatabase.go new file mode 100644 index 00000000..9391d6f8 --- /dev/null +++ b/commons/oci/containerdatabase.go @@ -0,0 +1,101 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package oci + +import ( + "context" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +/******************************** + * Autonomous Container Database + *******************************/ +func (d *DatabaseService) CreateAutonomousContainerDatabase(acd *dbv4.AutonomousContainerDatabase) (database.CreateAutonomousContainerDatabaseResponse, error) { + createAutonomousContainerDatabaseRequest := database.CreateAutonomousContainerDatabaseRequest{ + CreateAutonomousContainerDatabaseDetails: database.CreateAutonomousContainerDatabaseDetails{ + CompartmentId: acd.Spec.CompartmentOCID, + DisplayName: acd.Spec.DisplayName, + CloudAutonomousVmClusterId: acd.Spec.AutonomousExadataVMClusterOCID, + PatchModel: database.CreateAutonomousContainerDatabaseDetailsPatchModelUpdates, + }, + } + + return d.dbClient.CreateAutonomousContainerDatabase(context.TODO(), createAutonomousContainerDatabaseRequest) +} + +func (d *DatabaseService) GetAutonomousContainerDatabase(acdOCID string) (database.GetAutonomousContainerDatabaseResponse, error) { + getAutonomousContainerDatabaseRequest := database.GetAutonomousContainerDatabaseRequest{ + AutonomousContainerDatabaseId: common.String(acdOCID), + } + + return d.dbClient.GetAutonomousContainerDatabase(context.TODO(), getAutonomousContainerDatabaseRequest) +} + +func (d *DatabaseService) UpdateAutonomousContainerDatabase(acdOCID string, difACD *dbv4.AutonomousContainerDatabase) (database.UpdateAutonomousContainerDatabaseResponse, error) { + updateAutonomousContainerDatabaseRequest := database.UpdateAutonomousContainerDatabaseRequest{ + AutonomousContainerDatabaseId: common.String(acdOCID), + UpdateAutonomousContainerDatabaseDetails: database.UpdateAutonomousContainerDatabaseDetails{ + DisplayName: difACD.Spec.DisplayName, + PatchModel: database.UpdateAutonomousContainerDatabaseDetailsPatchModelEnum(difACD.Spec.PatchModel), + FreeformTags: difACD.Spec.FreeformTags, + }, + } + + return d.dbClient.UpdateAutonomousContainerDatabase(context.TODO(), updateAutonomousContainerDatabaseRequest) +} + +func (d *DatabaseService) RestartAutonomousContainerDatabase(acdOCID string) (database.RestartAutonomousContainerDatabaseResponse, error) { + restartRequest := database.RestartAutonomousContainerDatabaseRequest{ + AutonomousContainerDatabaseId: common.String(acdOCID), + } + + return d.dbClient.RestartAutonomousContainerDatabase(context.TODO(), restartRequest) +} + +func (d *DatabaseService) TerminateAutonomousContainerDatabase(acdOCID string) (database.TerminateAutonomousContainerDatabaseResponse, error) { + terminateRequest := database.TerminateAutonomousContainerDatabaseRequest{ + AutonomousContainerDatabaseId: common.String(acdOCID), + } + + return d.dbClient.TerminateAutonomousContainerDatabase(context.TODO(), terminateRequest) +} diff --git a/commons/oci/database.go b/commons/oci/database.go index 985d299b..e43afb56 100644 --- a/commons/oci/database.go +++ b/commons/oci/database.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,485 +40,393 @@ package oci import ( "context" - "errors" "fmt" - "math" - "time" "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" - "github.com/oracle/oci-go-sdk/v45/secrets" - "github.com/oracle/oci-go-sdk/v45/workrequests" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" "sigs.k8s.io/controller-runtime/pkg/client" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/k8s" ) -// CreateAutonomousDatabase sends a request to OCI to provision a database and returns the AutonomousDatabase OCID. -func CreateAutonomousDatabase(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, secretClient secrets.SecretsClient, adb *dbv1alpha1.AutonomousDatabase) (*database.CreateAutonomousDatabaseResponse, error) { - adminPassword, err := getAdminPassword(logger, kubeClient, secretClient, adb) - if err != nil { - return nil, err - } +type DatabaseService struct { + logger logr.Logger + kubeClient client.Client + dbClient database.DatabaseClient + vaultService VaultService +} - createAutonomousDatabaseDetails := database.CreateAutonomousDatabaseDetails{ - CompartmentId: adb.Spec.Details.CompartmentOCID, - DbName: adb.Spec.Details.DbName, - CpuCoreCount: adb.Spec.Details.CPUCoreCount, - DataStorageSizeInTBs: adb.Spec.Details.DataStorageSizeInTBs, - AdminPassword: common.String(adminPassword), - DisplayName: adb.Spec.Details.DisplayName, - IsAutoScalingEnabled: adb.Spec.Details.IsAutoScalingEnabled, - IsDedicated: adb.Spec.Details.IsDedicated, - DbVersion: adb.Spec.Details.DbVersion, - DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum( - adb.Spec.Details.DbWorkload), - SubnetId: adb.Spec.Details.SubnetOCID, - NsgIds: adb.Spec.Details.NsgOCIDs, - } +func NewDatabaseService( + logger logr.Logger, + kubeClient client.Client, + provider common.ConfigurationProvider) (databaseService DatabaseService, err error) { - createAutonomousDatabaseRequest := database.CreateAutonomousDatabaseRequest{ - CreateAutonomousDatabaseDetails: createAutonomousDatabaseDetails, + dbClient, err := database.NewDatabaseClientWithConfigurationProvider(provider) + if err != nil { + return databaseService, err } - resp, err := dbClient.CreateAutonomousDatabase(context.TODO(), createAutonomousDatabaseRequest) + vaultService, err := NewVaultService(logger, provider) if err != nil { - return nil, err + return databaseService, err } - return &resp, nil + return DatabaseService{ + logger: logger.WithName("dbService"), + kubeClient: kubeClient, + dbClient: dbClient, + vaultService: vaultService, + }, nil } -// Get the desired admin password from either Kubernetes Secret or OCI Vault Secret. -func getAdminPassword(logger logr.Logger, kubeClient client.Client, secretClient secrets.SecretsClient, adb *dbv1alpha1.AutonomousDatabase) (string, error) { - if adb.Spec.Details.AdminPassword.K8sSecretName != nil { - logger.Info(fmt.Sprintf("Getting admin password from Secret %s", *adb.Spec.Details.AdminPassword.K8sSecretName)) +/******************************** + * Autonomous Database + *******************************/ - namespacedName := types.NamespacedName{ - Namespace: adb.GetNamespace(), - Name: *adb.Spec.Details.AdminPassword.K8sSecretName, - } +// ReadPassword reads the password from passwordSpec, and returns the pointer to the read password string. +// The function returns a nil if nothing is read +func (d *DatabaseService) readPassword(namespace string, passwordSpec dbv4.PasswordSpec) (*string, error) { + logger := d.logger.WithName("readPassword") + + if passwordSpec.K8sSecret.Name != nil { + logger.Info(fmt.Sprintf("Getting password from Secret %s", *passwordSpec.K8sSecret.Name)) - key := *adb.Spec.Details.AdminPassword.K8sSecretName - adminPassword, err := getValueFromKubeSecret(kubeClient, namespacedName, key) + key := *passwordSpec.K8sSecret.Name + password, err := k8s.GetSecretValue(d.kubeClient, namespace, *passwordSpec.K8sSecret.Name, key) if err != nil { - return "", err + return nil, err } - return adminPassword, nil - } else if adb.Spec.Details.AdminPassword.OCISecretOCID != nil { - logger.Info(fmt.Sprintf("Getting admin password from OCI Vault Secret OCID %s", *adb.Spec.Details.AdminPassword.OCISecretOCID)) + return common.String(password), nil + } - adminPassword, err := getValueFromVaultSecret(secretClient, *adb.Spec.Details.AdminPassword.OCISecretOCID) + if passwordSpec.OciSecret.Id != nil { + logger.Info(fmt.Sprintf("Getting password from OCI Vault Secret OCID %s", *passwordSpec.OciSecret.Id)) + + password, err := d.vaultService.GetSecretValue(*passwordSpec.OciSecret.Id) if err != nil { - return "", err + return nil, err } - return adminPassword, nil + return common.String(password), nil } - return "", errors.New("should provide either AdminPasswordSecret or AdminPasswordOCID") + + return nil, nil } -func getValueFromKubeSecret(kubeClient client.Client, namespacedName types.NamespacedName, key string) (string, error) { - secret := &corev1.Secret{} - if err := kubeClient.Get(context.TODO(), namespacedName, secret); err != nil { - return "", err +func (d *DatabaseService) readACD_OCID(acd *dbv4.AcdSpec, namespace string) (*string, error) { + if acd.OciAcd.Id != nil { + return acd.OciAcd.Id, nil } - val, ok := secret.Data[key] - if !ok { - return "", errors.New("Secret key not found: " + key) + if acd.K8sAcd.Name != nil { + fetchedACD := &dbv4.AutonomousContainerDatabase{} + if err := k8s.FetchResource(d.kubeClient, namespace, *acd.K8sAcd.Name, fetchedACD); err != nil { + return nil, err + } + + return fetchedACD.Spec.AutonomousContainerDatabaseOCID, nil } - return string(val), nil + + return nil, nil } -// GetAutonomousDatabaseResource gets Autonomous Database information from a remote instance -// and return an AutonomousDatabase object -func GetAutonomousDatabaseResource(logger logr.Logger, dbClient database.DatabaseClient, adb *dbv1alpha1.AutonomousDatabase) (*dbv1alpha1.AutonomousDatabase, error) { - getAutonomousDatabaseRequest := database.GetAutonomousDatabaseRequest{ - AutonomousDatabaseId: adb.Spec.Details.AutonomousDatabaseOCID, +// CreateAutonomousDatabase sends a request to OCI to provision a database and returns the AutonomousDatabase OCID. +func (d *DatabaseService) CreateAutonomousDatabase(adb *dbv4.AutonomousDatabase) (resp database.CreateAutonomousDatabaseResponse, err error) { + adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Details.AdminPassword) + if err != nil { + return resp, err } - response, err := dbClient.GetAutonomousDatabase(context.TODO(), getAutonomousDatabaseRequest) + acdOCID, err := d.readACD_OCID(&adb.Spec.Details.AutonomousContainerDatabase, adb.Namespace) if err != nil { - return nil, err + return resp, err } - returnedADB := adb.UpdateAttrFromOCIAutonomousDatabase(response.AutonomousDatabase) - - logger.Info("Get information from remote AutonomousDatabase successfully") - return returnedADB, nil -} - -// isAttrChanged checks if the values of last successful object and current object are different. -// The function returns false if the types are mismatch or unknown. -// The function returns false if the current object has zero value (not applicable for boolean type). -func isAttrChanged(lastSucObj interface{}, curObj interface{}) bool { - switch curObj.(type) { - case string: // Enum - // type check - lastSucString, ok := lastSucObj.(string) - if !ok { - return false - } - curString := curObj.(string) - - if curString != "" && (lastSucString != curString) { - return true - } - case *int: - // type check - lastSucIntPtr, ok := lastSucObj.(*int) - if !ok { - return false - } - curIntPtr, ok := curObj.(*int) - - if lastSucIntPtr != nil && curIntPtr != nil && *curIntPtr != 0 && *lastSucIntPtr != *curIntPtr { - return true - } - case *string: - // type check - lastSucStringPtr, ok := lastSucObj.(*string) - if !ok { - return false - } - curStringPtr := curObj.(*string) - - if lastSucStringPtr != nil && curStringPtr != nil && *curStringPtr != "" && *lastSucStringPtr != *curStringPtr { - return true - } - case *bool: - // type check - lastSucBoolPtr, ok := lastSucObj.(*bool) - if !ok { - return false - } - curBoolPtr := curObj.(*bool) + createAutonomousDatabaseDetails := database.CreateAutonomousDatabaseDetails{ + CompartmentId: adb.Spec.Details.CompartmentId, + DbName: adb.Spec.Details.DbName, + CpuCoreCount: adb.Spec.Details.CpuCoreCount, + ComputeModel: database.CreateAutonomousDatabaseBaseComputeModelEnum(adb.Spec.Details.ComputeModel), + ComputeCount: adb.Spec.Details.ComputeCount, + OcpuCount: adb.Spec.Details.OcpuCount, + DataStorageSizeInTBs: adb.Spec.Details.DataStorageSizeInTBs, + AdminPassword: adminPassword, + DisplayName: adb.Spec.Details.DisplayName, + IsAutoScalingEnabled: adb.Spec.Details.IsAutoScalingEnabled, + IsDedicated: adb.Spec.Details.IsDedicated, + AutonomousContainerDatabaseId: acdOCID, + DbVersion: adb.Spec.Details.DbVersion, + DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum(adb.Spec.Details.DbWorkload), + LicenseModel: database.CreateAutonomousDatabaseBaseLicenseModelEnum(adb.Spec.Details.LicenseModel), + IsFreeTier: adb.Spec.Details.IsFreeTier, + IsAccessControlEnabled: adb.Spec.Details.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Details.WhitelistedIps, + IsMtlsConnectionRequired: adb.Spec.Details.IsMtlsConnectionRequired, + SubnetId: adb.Spec.Details.SubnetId, + NsgIds: adb.Spec.Details.NsgIds, + PrivateEndpointLabel: adb.Spec.Details.PrivateEndpointLabel, + + FreeformTags: adb.Spec.Details.FreeformTags, + } + + retryPolicy := common.DefaultRetryPolicy() - // For boolean type, we don't have to check zero value - if lastSucBoolPtr != nil && curBoolPtr != nil && *lastSucBoolPtr != *curBoolPtr { - return true - } - case []string: - // type check - lastSucSlice, ok := lastSucObj.([]string) - if !ok { - return false - } + createAutonomousDatabaseRequest := database.CreateAutonomousDatabaseRequest{ + CreateAutonomousDatabaseDetails: createAutonomousDatabaseDetails, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, + } - curSlice := curObj.([]string) - if curSlice == nil { - return false - } else if len(lastSucSlice) != len(curSlice) { - return true - } + resp, err = d.dbClient.CreateAutonomousDatabase(context.TODO(), createAutonomousDatabaseRequest) + if err != nil { + return resp, err + } - for i, v := range lastSucSlice { - if v != curSlice[i] { - return true - } - } - case map[string]string: - // type check - lastSucMap, ok := lastSucObj.(map[string]string) - if !ok { - return false - } + return resp, nil +} - curMap := curObj.(map[string]string) - if curMap == nil { - return false - } else if len(lastSucMap) != len(curMap) { - return true - } +func (d *DatabaseService) GetAutonomousDatabase(adbOCID string) (database.GetAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - for k, v := range lastSucMap { - if w, ok := curMap[k]; !ok || v != w { - return true - } - } + getAutonomousDatabaseRequest := database.GetAutonomousDatabaseRequest{ + AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - return false + + return d.dbClient.GetAutonomousDatabase(context.TODO(), getAutonomousDatabaseRequest) } -// UpdateGeneralAndPasswordAttributes updates the general and password attributes of the Autonomous Database. -// Based on the responses from OCI calls, we can split the attributes into the following five categories. -// AutonomousDatabaseOCID, CompartmentOCID, IsDedicated, and LifecycleState are excluded since they not applicable in updateAutonomousDatabaseRequest. -// Except for category 1, category 2 and 3 cannot be updated at the same time, i.e.,we can at most update category 1 plus another category 2,or 3. -// 1. General attribute: including DisplayName, DbName, DbWorkload, DbVersion, freeformTags, subnetOCID, nsgOCIDs, and whitelistedIPs. The general attributes can be updated together with one of the other categories in the same request. -// 2. Scale attribute: includining IsAutoScalingEnabled, CpuCoreCount and DataStorageSizeInTBs. -// 3. Password attribute: including AdminPasswordSecret and AdminPasswordOCID -// From the above rules, we group general and password attributes and send the update together in the same request, and then send the scale update in another request. -func UpdateGeneralAndPasswordAttributes(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, - secretClient secrets.SecretsClient, curADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - var shouldSendRequest = false - - lastSucSpec, err := curADB.GetLastSuccessfulSpec() +func (d *DatabaseService) UpdateAutonomousDatabase(adbOCID string, adb *dbv4.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { + // Retrieve admin password + adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Details.AdminPassword) if err != nil { return resp, err } - // Prepare the update request - updateAutonomousDatabaseDetails := database.UpdateAutonomousDatabaseDetails{} + retryPolicy := common.DefaultRetryPolicy() - if isAttrChanged(lastSucSpec.Details.DisplayName, curADB.Spec.Details.DisplayName) { - updateAutonomousDatabaseDetails.DisplayName = curADB.Spec.Details.DisplayName - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.DbName, curADB.Spec.Details.DbName) { - updateAutonomousDatabaseDetails.DbName = curADB.Spec.Details.DbName - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.DbWorkload, curADB.Spec.Details.DbWorkload) { - updateAutonomousDatabaseDetails.DbWorkload = database.UpdateAutonomousDatabaseDetailsDbWorkloadEnum(curADB.Spec.Details.DbWorkload) - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.DbVersion, curADB.Spec.Details.DbVersion) { - updateAutonomousDatabaseDetails.DbVersion = curADB.Spec.Details.DbVersion - shouldSendRequest = true + updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ + AutonomousDatabaseId: common.String(adbOCID), + UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ + DisplayName: adb.Spec.Details.DisplayName, + DbName: adb.Spec.Details.DbName, + DbVersion: adb.Spec.Details.DbVersion, + FreeformTags: adb.Spec.Details.FreeformTags, + DbWorkload: database.UpdateAutonomousDatabaseDetailsDbWorkloadEnum(adb.Spec.Details.DbWorkload), + LicenseModel: database.UpdateAutonomousDatabaseDetailsLicenseModelEnum(adb.Spec.Details.LicenseModel), + AdminPassword: adminPassword, + DataStorageSizeInTBs: adb.Spec.Details.DataStorageSizeInTBs, + CpuCoreCount: adb.Spec.Details.CpuCoreCount, + ComputeModel: database.UpdateAutonomousDatabaseDetailsComputeModelEnum(adb.Spec.Details.ComputeModel), + ComputeCount: adb.Spec.Details.ComputeCount, + OcpuCount: adb.Spec.Details.OcpuCount, + IsAutoScalingEnabled: adb.Spec.Details.IsAutoScalingEnabled, + IsFreeTier: adb.Spec.Details.IsFreeTier, + IsMtlsConnectionRequired: adb.Spec.Details.IsMtlsConnectionRequired, + IsAccessControlEnabled: adb.Spec.Details.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Details.WhitelistedIps, + SubnetId: adb.Spec.Details.SubnetId, + NsgIds: adb.Spec.Details.NsgIds, + PrivateEndpointLabel: adb.Spec.Details.PrivateEndpointLabel, + }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } + return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) +} - if isAttrChanged(lastSucSpec.Details.FreeformTags, curADB.Spec.Details.FreeformTags) { - updateAutonomousDatabaseDetails.FreeformTags = curADB.Spec.Details.FreeformTags - shouldSendRequest = true - } +func (d *DatabaseService) StartAutonomousDatabase(adbOCID string) (database.StartAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - if isAttrChanged(lastSucSpec.Details.FreeformTags, curADB.Spec.Details.FreeformTags) { - updateAutonomousDatabaseDetails.FreeformTags = curADB.Spec.Details.FreeformTags - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.SubnetOCID, curADB.Spec.Details.SubnetOCID) { - updateAutonomousDatabaseDetails.SubnetId = curADB.Spec.Details.SubnetOCID - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.NsgOCIDs, curADB.Spec.Details.NsgOCIDs) { - updateAutonomousDatabaseDetails.NsgIds = curADB.Spec.Details.NsgOCIDs - shouldSendRequest = true + startRequest := database.StartAutonomousDatabaseRequest{ + AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - if isAttrChanged(lastSucSpec.Details.AdminPassword.K8sSecretName, curADB.Spec.Details.AdminPassword.K8sSecretName) || - isAttrChanged(lastSucSpec.Details.AdminPassword.OCISecretOCID, curADB.Spec.Details.AdminPassword.OCISecretOCID) { - // Get the adminPassword - var adminPassword string + return d.dbClient.StartAutonomousDatabase(context.TODO(), startRequest) +} - adminPassword, err = getAdminPassword(logger, kubeClient, secretClient, curADB) - if err != nil { - return - } - updateAutonomousDatabaseDetails.AdminPassword = common.String(adminPassword) +func (d *DatabaseService) StopAutonomousDatabase(adbOCID string) (database.StopAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - shouldSendRequest = true + stopRequest := database.StopAutonomousDatabaseRequest{ + AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - // Send the request only when something changes - if shouldSendRequest { - - logger.Info("Sending general attributes and ADMIN password update request") + return d.dbClient.StopAutonomousDatabase(context.TODO(), stopRequest) +} - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - // AutonomousDatabaseId: common.String(curADB.Spec.Details.AutonomousDatabaseOCID), - AutonomousDatabaseId: curADB.Spec.Details.AutonomousDatabaseOCID, - UpdateAutonomousDatabaseDetails: updateAutonomousDatabaseDetails, - } +func (d *DatabaseService) DeleteAutonomousDatabase(adbOCID string) (database.DeleteAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - resp, err = dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) + deleteRequest := database.DeleteAutonomousDatabaseRequest{ + AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - return + return d.dbClient.DeleteAutonomousDatabase(context.TODO(), deleteRequest) } -// UpdateScaleAttributes updates the scale attributes of the Autonomous Database -// Refer to UpdateGeneralAndPasswordAttributes for more details about how and why we separate the attributes in different calls. -func UpdateScaleAttributes(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, - curADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - var shouldSendRequest = false - - lastSucSpec, err := curADB.GetLastSuccessfulSpec() +func (d *DatabaseService) DownloadWallet(adb *dbv4.AutonomousDatabase) (resp database.GenerateAutonomousDatabaseWalletResponse, err error) { + // Prepare wallet password + walletPassword, err := d.readPassword(adb.Namespace, adb.Spec.Wallet.Password) if err != nil { return resp, err } - // Prepare the update request - updateAutonomousDatabaseDetails := database.UpdateAutonomousDatabaseDetails{} - - if isAttrChanged(lastSucSpec.Details.DataStorageSizeInTBs, curADB.Spec.Details.DataStorageSizeInTBs) { - updateAutonomousDatabaseDetails.DataStorageSizeInTBs = curADB.Spec.Details.DataStorageSizeInTBs - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.CPUCoreCount, curADB.Spec.Details.CPUCoreCount) { - updateAutonomousDatabaseDetails.CpuCoreCount = curADB.Spec.Details.CPUCoreCount - shouldSendRequest = true - } - if isAttrChanged(lastSucSpec.Details.IsAutoScalingEnabled, curADB.Spec.Details.IsAutoScalingEnabled) { - updateAutonomousDatabaseDetails.IsAutoScalingEnabled = curADB.Spec.Details.IsAutoScalingEnabled - shouldSendRequest = true - } - - // Don't send the request if nothing is changed - if shouldSendRequest { - - logger.Info("Sending scale attributes update request") + retryPolicy := common.DefaultRetryPolicy() - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - // AutonomousDatabaseId: common.String(curADB.Spec.Details.AutonomousDatabaseOCID), - AutonomousDatabaseId: curADB.Spec.Details.AutonomousDatabaseOCID, - UpdateAutonomousDatabaseDetails: updateAutonomousDatabaseDetails, - } - - resp, err = dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) + // Download a Wallet + req := database.GenerateAutonomousDatabaseWalletRequest{ + AutonomousDatabaseId: adb.Spec.Details.Id, + GenerateAutonomousDatabaseWalletDetails: database.GenerateAutonomousDatabaseWalletDetails{ + Password: walletPassword, + }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - return -} - -// SetAutonomousDatabaseLifecycleState starts or stops AutonomousDatabase in OCI based on the LifeCycleState attribute -func SetAutonomousDatabaseLifecycleState(logger logr.Logger, dbClient database.DatabaseClient, adb *dbv1alpha1.AutonomousDatabase) (resp interface{}, err error) { - lastSucSpec, err := adb.GetLastSuccessfulSpec() + // Send the request using the service client + resp, err = d.dbClient.GenerateAutonomousDatabaseWallet(context.TODO(), req) if err != nil { return resp, err } - // Return if the desired lifecycle state is the same as the current lifecycle state - if adb.Spec.Details.LifecycleState == lastSucSpec.Details.LifecycleState { - return nil, nil - } - - switch string(adb.Spec.Details.LifecycleState) { - case string(database.AutonomousDatabaseLifecycleStateAvailable): - logger.Info("Sending start request to the Autonomous Database " + *adb.Spec.Details.DbName) - - resp, err = startAutonomousDatabase(dbClient, *adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return - } - - case string(database.AutonomousDatabaseLifecycleStateStopped): - logger.Info("Sending stop request to the Autonomous Database " + *adb.Spec.Details.DbName) - - resp, err = stopAutonomousDatabase(dbClient, *adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return - } - - case string(database.AutonomousDatabaseLifecycleStateTerminated): - // Special case. - if adb.Spec.Details.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminating { - break - } - logger.Info("Sending teminate request to the Autonomous Database " + *adb.Spec.Details.DbName) - - resp, err = DeleteAutonomousDatabase(dbClient, *adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return - } - - default: - err = fmt.Errorf("invalid lifecycleState value: currently the operator only accept %s, %s and %s as the value of the lifecycleState parameter", - database.AutonomousDatabaseLifecycleStateAvailable, - database.AutonomousDatabaseLifecycleStateStopped, - database.AutonomousDatabaseLifecycleStateTerminated) - } - - return + return resp, nil } -// startAutonomousDatabase starts an Autonomous Database in OCI -func startAutonomousDatabase(dbClient database.DatabaseClient, adbOCID string) (resp database.StartAutonomousDatabaseResponse, err error) { - startRequest := database.StartAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - } +/******************************** + * Autonomous Database Restore + *******************************/ - resp, err = dbClient.StartAutonomousDatabase(context.Background(), startRequest) - return -} +func (d *DatabaseService) RestoreAutonomousDatabase(adbOCID string, sdkTime common.SDKTime) (database.RestoreAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() -// stopAutonomousDatabase stops an Autonomous Database in OCI -func stopAutonomousDatabase(dbClient database.DatabaseClient, adbOCID string) (resp database.StopAutonomousDatabaseResponse, err error) { - stopRequest := database.StopAutonomousDatabaseRequest{ + request := database.RestoreAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), + RestoreAutonomousDatabaseDetails: database.RestoreAutonomousDatabaseDetails{ + Timestamp: &sdkTime, + }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - - resp, err = dbClient.StopAutonomousDatabase(context.Background(), stopRequest) - return + return d.dbClient.RestoreAutonomousDatabase(context.TODO(), request) } -// DeleteAutonomousDatabase terminates an Autonomous Database in OCI -func DeleteAutonomousDatabase(dbClient database.DatabaseClient, adbOCID string) (resp database.DeleteAutonomousDatabaseResponse, err error) { +/******************************** + * Autonomous Database Backup + *******************************/ - deleteRequest := database.DeleteAutonomousDatabaseRequest{ +func (d *DatabaseService) ListAutonomousDatabaseBackups(adbOCID string) (database.ListAutonomousDatabaseBackupsResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + + listBackupRequest := database.ListAutonomousDatabaseBackupsRequest{ AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - resp, err = dbClient.DeleteAutonomousDatabase(context.Background(), deleteRequest) - return + return d.dbClient.ListAutonomousDatabaseBackups(context.TODO(), listBackupRequest) } -func WaitUntilWorkCompleted(logger logr.Logger, workClient workrequests.WorkRequestClient, opcWorkRequestID *string) error { - if opcWorkRequestID == nil { - return nil - } - - logger.Info("Waiting for the work request to finish. opcWorkRequestID = " + *opcWorkRequestID) +func (d *DatabaseService) CreateAutonomousDatabaseBackup(adbBackup *dbv4.AutonomousDatabaseBackup, adbOCID string) (database.CreateAutonomousDatabaseBackupResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - retryPolicy := getCompleteWorkRetryPolicy() - // Apply wait until work complete retryPolicy - workRequest := workrequests.GetWorkRequestRequest{ - WorkRequestId: opcWorkRequestID, + createBackupRequest := database.CreateAutonomousDatabaseBackupRequest{ + CreateAutonomousDatabaseBackupDetails: database.CreateAutonomousDatabaseBackupDetails{ + AutonomousDatabaseId: common.String(adbOCID), + IsLongTermBackup: adbBackup.Spec.IsLongTermBackup, + RetentionPeriodInDays: adbBackup.Spec.RetentionPeriodInDays, + }, RequestMetadata: common.RequestMetadata{ RetryPolicy: &retryPolicy, }, } - // GetWorkRequest retries until the work status is SUCCEEDED - if _, err := workClient.GetWorkRequest(context.TODO(), workRequest); err != nil { - return err + // Use the spec.displayName as the displayName of the backup if is provided, + // otherwise use the resource name as the displayName. + if adbBackup.Spec.DisplayName != nil { + createBackupRequest.DisplayName = adbBackup.Spec.DisplayName + } else { + createBackupRequest.DisplayName = common.String(adbBackup.GetName()) } - return nil + return d.dbClient.CreateAutonomousDatabaseBackup(context.TODO(), createBackupRequest) } -func getCompleteWorkRetryPolicy() common.RetryPolicy { - shouldRetry := func(r common.OCIOperationResponse) bool { - if _, isServiceError := common.IsServiceError(r.Error); isServiceError { - // Don't retry if it's service error. Sometimes it could be network error or other errors which prevents - // request send to server; we do the retry in these cases. - return false - } - - if converted, ok := r.Response.(workrequests.GetWorkRequestResponse); ok { - // do the retry until WorkReqeut Status is Succeeded - ignore case (BMI-2652) - return converted.Status != workrequests.WorkRequestStatusSucceeded - } +func (d *DatabaseService) GetAutonomousDatabaseBackup(backupOCID string) (database.GetAutonomousDatabaseBackupResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - return true + getBackupRequest := database.GetAutonomousDatabaseBackupRequest{ + AutonomousDatabaseBackupId: common.String(backupOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - return getRetryPolicy(shouldRetry) + return d.dbClient.GetAutonomousDatabaseBackup(context.TODO(), getBackupRequest) } -func getConflictRetryPolicy() common.RetryPolicy { - // retry for 409 conflict status code - shouldRetry := func(r common.OCIOperationResponse) bool { - return r.Error != nil && r.Response.HTTPResponse().StatusCode == 409 +func (d *DatabaseService) CreateAutonomousDatabaseClone(adb *dbv4.AutonomousDatabase) (resp database.CreateAutonomousDatabaseResponse, err error) { + adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Clone.AdminPassword) + if err != nil { + return resp, err } - return getRetryPolicy(shouldRetry) -} - -func getRetryPolicy(retryOperation func(common.OCIOperationResponse) bool) common.RetryPolicy { - // maximum times of retry - attempts := uint(10) + acdOCID, err := d.readACD_OCID(&adb.Spec.Clone.AutonomousContainerDatabase, adb.Namespace) + if err != nil { + return resp, err + } - nextDuration := func(r common.OCIOperationResponse) time.Duration { - // you might want wait longer for next retry when your previous one failed - // this function will return the duration as: - // 1s, 2s, 4s, 8s, 16s, 32s, 64s etc... - return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second + retryPolicy := common.DefaultRetryPolicy() + request := database.CreateAutonomousDatabaseRequest{ + CreateAutonomousDatabaseDetails: database.CreateAutonomousDatabaseCloneDetails{ + CompartmentId: adb.Spec.Clone.CompartmentId, + SourceId: adb.Spec.Details.Id, + AutonomousContainerDatabaseId: acdOCID, + DisplayName: adb.Spec.Clone.DisplayName, + DbName: adb.Spec.Clone.DbName, + DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum(adb.Spec.Clone.DbWorkload), + LicenseModel: database.CreateAutonomousDatabaseBaseLicenseModelEnum(adb.Spec.Clone.LicenseModel), + DbVersion: adb.Spec.Clone.DbVersion, + DataStorageSizeInTBs: adb.Spec.Clone.DataStorageSizeInTBs, + CpuCoreCount: adb.Spec.Clone.CpuCoreCount, + ComputeModel: database.CreateAutonomousDatabaseBaseComputeModelEnum(adb.Spec.Clone.ComputeModel), + ComputeCount: adb.Spec.Clone.ComputeCount, + OcpuCount: adb.Spec.Clone.OcpuCount, + AdminPassword: adminPassword, + IsAutoScalingEnabled: adb.Spec.Clone.IsAutoScalingEnabled, + IsDedicated: adb.Spec.Clone.IsDedicated, + IsFreeTier: adb.Spec.Clone.IsFreeTier, + IsAccessControlEnabled: adb.Spec.Clone.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Clone.WhitelistedIps, + SubnetId: adb.Spec.Clone.SubnetId, + NsgIds: adb.Spec.Clone.NsgIds, + PrivateEndpointLabel: adb.Spec.Clone.PrivateEndpointLabel, + IsMtlsConnectionRequired: adb.Spec.Clone.IsMtlsConnectionRequired, + FreeformTags: adb.Spec.Clone.FreeformTags, + CloneType: adb.Spec.Clone.CloneType, + }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } - return common.NewRetryPolicy(attempts, retryOperation, nextDuration) + return d.dbClient.CreateAutonomousDatabase(context.TODO(), request) } diff --git a/commons/oci/provider.go b/commons/oci/provider.go index f5bd6cda..f466f226 100644 --- a/commons/oci/provider.go +++ b/commons/oci/provider.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022, 2024 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -39,16 +39,16 @@ package oci import ( - "context" "errors" + "fmt" + "os" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/common/auth" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/common/auth" "sigs.k8s.io/controller-runtime/pkg/client" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + "github.com/oracle/oracle-database-operator/commons/k8s" ) const ( @@ -60,14 +60,16 @@ const ( privatekeyKey = "privatekey" ) -type APIKeyAuth struct { +type ApiKeyAuth struct { ConfigMapName *string SecretName *string Namespace string } -func GetOCIProvider(kubeClient client.Client, authData APIKeyAuth) (common.ConfigurationProvider, error) { - if authData.ConfigMapName != nil && authData.SecretName != nil { +func GetOciProvider(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { + if authData.ConfigMapName != nil && authData.SecretName == nil { + return getWorkloadIdentityProvider(kubeClient, authData) + } else if authData.ConfigMapName != nil && authData.SecretName != nil { provider, err := getProviderWithAPIKey(kubeClient, authData) if err != nil { return nil, err @@ -77,21 +79,41 @@ func GetOCIProvider(kubeClient client.Client, authData APIKeyAuth) (common.Confi } else if authData.ConfigMapName == nil && authData.SecretName == nil { return auth.InstancePrincipalConfigurationProvider() } else { - return nil, errors.New("You have to provide both the OCI ConfigMap and the privateKey to authorize with API signing key, " + - "or leave them both empty to authorize with Instance Principal. Check if the spec configuration is correct.") + return nil, errors.New("both the OCI ConfigMap and the privateKey are required to authorize with API signing key; " + + "leave them both empty to authorize with Instance Principal") } } -func getProviderWithAPIKey(kubeClient client.Client, authData APIKeyAuth) (common.ConfigurationProvider, error) { - var region, fingerprint, user, tenancy, passphrase, privatekeyValue string +func getWorkloadIdentityProvider(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { + ociConfigMap, err := k8s.FetchConfigMap(kubeClient, authData.Namespace, *authData.ConfigMapName) + if err != nil { + return nil, err + } + // Ensure configmap is set with proper data + if len(ociConfigMap.Data) == 0 { + return nil, fmt.Errorf("OCI ConfigMap %s has no data", ociConfigMap.Name) + } + region, ok := ociConfigMap.Data[regionKey] + if !ok || len(region) == 0 { + return nil, fmt.Errorf("OCI Region Key %s missing from OCI ConfigMap %s", regionKey, ociConfigMap.Name) + } + // OCI SDK requires specific, dynamic environment variables for workload identity. + if err = os.Setenv(auth.ResourcePrincipalVersionEnvVar, auth.ResourcePrincipalVersion2_2); err != nil { - // Read ConfigMap - configMapNamespacedName := types.NamespacedName{ - Namespace: authData.Namespace, - Name: *authData.ConfigMapName, + return nil, fmt.Errorf("unable to set OCI SDK environment variable %s: %v", auth.ResourcePrincipalVersionEnvVar, err) } - ociConfigMap := &corev1.ConfigMap{} - if err := kubeClient.Get(context.TODO(), configMapNamespacedName, ociConfigMap); err != nil { + if err = os.Setenv(auth.ResourcePrincipalRegionEnvVar, region); err != nil { + return nil, fmt.Errorf("unable to set OCI SDK environment variable %s: %v", auth.ResourcePrincipalRegionEnvVar, err) + } + return auth.OkeWorkloadIdentityConfigurationProvider() +} + +func getProviderWithAPIKey(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { + var region, fingerprint, user, tenancy, passphrase, privatekeyValue string + + // Prepare ConfigMap + ociConfigMap, err := k8s.FetchConfigMap(kubeClient, authData.Namespace, *authData.ConfigMapName) + if err != nil { return nil, err } @@ -111,24 +133,11 @@ func getProviderWithAPIKey(kubeClient client.Client, authData APIKeyAuth) (commo } } - // Read Secret - secretNamespacedName := types.NamespacedName{ - Namespace: authData.Namespace, - Name: *authData.SecretName, - } - - privatekeySecret := &corev1.Secret{} - if err := kubeClient.Get(context.TODO(), secretNamespacedName, privatekeySecret); err != nil { + // Prepare privatekey value + privatekeyValue, err = k8s.GetSecretValue(kubeClient, authData.Namespace, *authData.SecretName, privatekeyKey) + if err != nil { return nil, err } - for key, val := range privatekeySecret.Data { - if key == privatekeyKey { - privatekeyValue = string(val) - } else { - return nil, errors.New("Unable to identify the key: " + key) - } - } - return common.NewRawConfigurationProvider(tenancy, user, region, fingerprint, privatekeyValue, &passphrase), nil } diff --git a/commons/oci/vault.go b/commons/oci/vault.go index 6bf790aa..d7069a88 100644 --- a/commons/oci/vault.go +++ b/commons/oci/vault.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,16 +42,41 @@ import ( "context" "encoding/base64" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/secrets" + "github.com/go-logr/logr" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/secrets" ) -func getValueFromVaultSecret(secretClient secrets.SecretsClient, vaultSecretOCID string) (string, error) { +type VaultService interface { + GetSecretValue(vaultSecretOCID string) (string, error) +} + +type vaultService struct { + logger logr.Logger + secretClient secrets.SecretsClient +} + +func NewVaultService( + logger logr.Logger, + provider common.ConfigurationProvider) (VaultService, error) { + + secretClient, err := secrets.NewSecretsClientWithConfigurationProvider(provider) + if err != nil { + return nil, err + } + + return &vaultService{ + logger: logger.WithName("vaultService"), + secretClient: secretClient, + }, nil +} + +func (v *vaultService) GetSecretValue(vaultSecretOCID string) (string, error) { request := secrets.GetSecretBundleRequest{ SecretId: common.String(vaultSecretOCID), } - response, err := secretClient.GetSecretBundle(context.TODO(), request) + response, err := v.secretClient.GetSecretBundle(context.TODO(), request) if err != nil { return "", err } diff --git a/commons/oci/wallet.go b/commons/oci/wallet.go index 3faa4f16..076460b1 100644 --- a/commons/oci/wallet.go +++ b/commons/oci/wallet.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,142 +40,71 @@ package oci import ( "archive/zip" - "context" - "errors" - "fmt" "io" "io/ioutil" - "math" - "time" - - "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" - "github.com/oracle/oci-go-sdk/v45/secrets" - "sigs.k8s.io/controller-runtime/pkg/client" - - "k8s.io/apimachinery/pkg/types" - - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + "strings" ) -// GetWallet downloads the wallet using the given information in the AutonomousDatabase object. -// The function then unzips the wallet and returns a map object which holds the byte values of the unzipped files. -func GetWallet(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, secretClient secrets.SecretsClient, adb *dbv1alpha1.AutonomousDatabase) (map[string][]byte, error) { - // Get the wallet password from Secret then Vault Secret - walletPassword, err := getWalletPassword(logger, kubeClient, secretClient, adb) - if err != nil { - return nil, err - } - - // Request to download a wallet with the given password - resp, err := generateAutonomousDatabaseWallet(dbClient, *adb.Spec.Details.AutonomousDatabaseOCID, walletPassword) +// ExtractWallet extracts the wallet and returns a map object which holds the byte values of the unzipped files. +func ExtractWallet(content io.ReadCloser) (map[string][]byte, error) { + path, err := saveWalletZip(content) if err != nil { return nil, err } - // Unzip the file - outZip, err := ioutil.TempFile("", "wallet*.zip") - if err != nil { - return nil, err - } - defer outZip.Close() - - if _, err := io.Copy(outZip, resp.Content); err != nil { - return nil, err - } - - data, err := unzipWallet(outZip.Name()) + data, err := unzipWallet(path) if err != nil { return nil, err } return data, nil } -func getWalletPassword(logger logr.Logger, kubeClient client.Client, secretClient secrets.SecretsClient, adb *dbv1alpha1.AutonomousDatabase) (string, error) { - if adb.Spec.Details.Wallet.Password.K8sSecretName != nil { - logger.Info(fmt.Sprintf("Getting wallet password from Secret %s", *adb.Spec.Details.Wallet.Password.K8sSecretName)) - - namespacedName := types.NamespacedName{ - Namespace: adb.GetNamespace(), - Name: *adb.Spec.Details.Wallet.Password.K8sSecretName, - } - - key := *adb.Spec.Details.Wallet.Password.K8sSecretName - walletPassword, err := getValueFromKubeSecret(kubeClient, namespacedName, key) - if err != nil { - return "", err - } - return walletPassword, nil - - } else if adb.Spec.Details.Wallet.Password.OCISecretOCID != nil { - logger.Info(fmt.Sprintf("Getting wallet password from OCI Vault Secret OCID %s", *adb.Spec.Details.Wallet.Password.OCISecretOCID)) - - walletPassword, err := getValueFromVaultSecret(secretClient, *adb.Spec.Details.Wallet.Password.OCISecretOCID) - if err != nil { - return "", err - } - return walletPassword, nil - } - return "", errors.New("should provide either InstancewalletPasswordSecret or a InstancewalletPasswordId") -} - -func generateAutonomousDatabaseWallet(dbClient database.DatabaseClient, adbOCID string, walletPassword string) (database.GenerateAutonomousDatabaseWalletResponse, error) { - - // maximum times of retry - attempts := uint(10) - - // retry for all non-200 status code - retryOnAllNon200ResponseCodes := func(r common.OCIOperationResponse) bool { - return !(r.Error == nil && 199 < r.Response.HTTPResponse().StatusCode && r.Response.HTTPResponse().StatusCode < 300) - } - - nextDuration := func(r common.OCIOperationResponse) time.Duration { - // Wait longer for next retry when your previous one failed - // this function will return the duration as: - // 1s, 2s, 4s, 8s, 16s, 32s, 64s etc... - return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second +func saveWalletZip(content io.ReadCloser) (string, error) { + // Create a temp file wallet*.zip + const walletFileName = "wallet*.zip" + outZip, err := ioutil.TempFile("", walletFileName) + if err != nil { + return "", err } + defer outZip.Close() - walletRetryPolicy := common.NewRetryPolicy(attempts, retryOnAllNon200ResponseCodes, nextDuration) - - // Download a Wallet - req := database.GenerateAutonomousDatabaseWalletRequest{ - AutonomousDatabaseId: common.String(adbOCID), - GenerateAutonomousDatabaseWalletDetails: database.GenerateAutonomousDatabaseWalletDetails{ - Password: common.String(walletPassword), - }, - RequestMetadata: common.RequestMetadata{ - RetryPolicy: &walletRetryPolicy, - }, + // Save the wallet in wallet*.zip + if _, err := io.Copy(outZip, content); err != nil { + return "", err } - // Send the request using the service client - return dbClient.GenerateAutonomousDatabaseWallet(context.TODO(), req) + return outZip.Name(), nil } -func unzipWallet(filename string) (map[string][]byte, error) { - data := map[string][]byte{} +func unzipWallet(path string) (map[string][]byte, error) { + files := map[string][]byte{} - reader, err := zip.OpenReader(filename) + reader, err := zip.OpenReader(path) if err != nil { - return data, err + return files, err } defer reader.Close() for _, file := range reader.File { reader, err := file.Open() if err != nil { - return data, err + return files, err } content, err := ioutil.ReadAll(reader) if err != nil { - return data, err + return files, err } - data[file.Name] = content + files[file.Name] = content } - return data, nil + return files, nil +} + +func WalletExpiringDate(files map[string][]byte) string { + data := string(files["README"]) + + line := data[strings.Index(data, "this wallet will expire on"):strings.Index(data, ".\nIn order to avoid")] + return strings.TrimSpace(strings.TrimPrefix(line, "this wallet will expire on")) } diff --git a/commons/oci/workrequest.go b/commons/oci/workrequest.go new file mode 100644 index 00000000..f68d2766 --- /dev/null +++ b/commons/oci/workrequest.go @@ -0,0 +1,102 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package oci + +import ( + "context" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/workrequests" +) + +type WorkRequestService interface { + Get(opcWorkRequestID string) (workrequests.GetWorkRequestResponse, error) + List(compartmentID string, resourceID string) (workrequests.ListWorkRequestsResponse, error) +} + +type workRequestService struct { + logger logr.Logger + workClient workrequests.WorkRequestClient +} + +func NewWorkRequestService( + logger logr.Logger, + kubeClient client.Client, + provider common.ConfigurationProvider) (WorkRequestService, error) { + + workClient, err := workrequests.NewWorkRequestClientWithConfigurationProvider(provider) + if err != nil { + return nil, err + } + + return &workRequestService{ + logger: logger.WithName("workRequestService"), + workClient: workClient, + }, nil +} + +func (w *workRequestService) Get(opcWorkRequestID string) (workrequests.GetWorkRequestResponse, error) { + workRequest := workrequests.GetWorkRequestRequest{ + WorkRequestId: common.String(opcWorkRequestID), + } + + resp, err := w.workClient.GetWorkRequest(context.TODO(), workRequest) + if err != nil { + return resp, err + } + + return resp, nil +} + +func (w *workRequestService) List(compartmentID string, resourceID string) (workrequests.ListWorkRequestsResponse, error) { + req := workrequests.ListWorkRequestsRequest{ + CompartmentId: common.String(compartmentID), + ResourceId: common.String(resourceID), + } + + resp, err := w.workClient.ListWorkRequests(context.TODO(), req) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/commons/sharding/catalog.go b/commons/sharding/catalog.go index a24abeda..646c89b8 100644 --- a/commons/sharding/catalog.go +++ b/commons/sharding/catalog.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -43,9 +43,8 @@ import ( "reflect" "strconv" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/go-logr/logr" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -54,7 +53,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func buildLabelsForCatalog(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForCatalog(instance *databasev4.ShardingDatabase, label string, catalogName string) map[string]string { return map[string]string{ "app": "OracleSharding", "type": "Catalog", @@ -62,7 +61,7 @@ func buildLabelsForCatalog(instance *databasev1alpha1.ShardingDatabase, label st } } -func getLabelForCatalog(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForCatalog(instance *databasev4.ShardingDatabase) string { // if len(OraCatalogSpex.Label) !=0 { // return OraCatalogSpex.Label @@ -71,7 +70,7 @@ func getLabelForCatalog(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *appsv1.StatefulSet { +func BuildStatefulSetForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForCatalog(), ObjectMeta: builObjectMetaForCatalog(instance, OraCatalogSpex), @@ -92,29 +91,29 @@ func buildTypeMetaForCatalog() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) metav1.ObjectMeta { +func builObjectMetaForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraCatalogSpex.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraCatalogSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForCatalog(instance, "sharding"), + MatchLabels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Spec: *buildPodSpecForCatalog(instance, OraCatalogSpex), }, @@ -132,7 +131,7 @@ func buildStatefulSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, Or // Function to build PodSpec -func buildPodSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *corev1.PodSpec { +func buildPodSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -141,10 +140,14 @@ func buildPodSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCata RunAsUser: &user, FSGroup: &group, }, - InitContainers: buildInitContainerSpecForCatalog(instance, OraCatalogSpex), - Containers: buildContainerSpecForCatalog(instance, OraCatalogSpex), - Volumes: buildVolumeSpecForCatalog(instance, OraCatalogSpex), + Containers: buildContainerSpecForCatalog(instance, OraCatalogSpex), + Volumes: buildVolumeSpecForCatalog(instance, OraCatalogSpex), + } + + if (instance.Spec.IsDownloadScripts) && (instance.Spec.ScriptsLocation != "") { + spec.InitContainers = buildInitContainerSpecForCatalog(instance, OraCatalogSpex) } + if len(instance.Spec.DbImagePullSecret) > 0 { spec.ImagePullSecrets = []corev1.LocalObjectReference{ { @@ -163,23 +166,17 @@ func buildPodSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCata } // Function to build Volume Spec -func buildVolumeSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Volume { +func buildVolumeSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { Name: OraCatalogSpex.Name + "secretmap-vol3", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: instance.Spec.Secret, + SecretName: instance.Spec.DbSecret.Name, }, }, }, - { - Name: OraCatalogSpex.Name + "orascript-vol5", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, { Name: OraCatalogSpex.Name + "oradshm-vol6", VolumeSource: corev1.VolumeSource{ @@ -196,11 +193,21 @@ func buildVolumeSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraC result = append(result, corev1.Volume{Name: OraCatalogSpex.Name + "orastage-vol7", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.StagePvcName}}}) } + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.Volume{Name: OraCatalogSpex.Name + "orascript-vol5", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) + } + + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.Volume{Name: OraCatalogSpex.Name + "shared-storage-vol8", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.TdeWalletPvc}}}) + } + } + return result } // Function to build the container Specification -func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Container { +func buildContainerSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Container { // building Continer spec var result []corev1.Container containerSpec := corev1.Container{ @@ -208,7 +215,7 @@ func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, O Image: instance.Spec.DbImage, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_RAW"}, + Add: []corev1.Capability{corev1.Capability("NET_ADMIN"), corev1.Capability("SYS_NICE")}, }, }, Resources: corev1.ResourceRequirements{ @@ -217,29 +224,49 @@ func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, O VolumeMounts: buildVolumeMountSpecForCatalog(instance, OraCatalogSpex), LivenessProbe: &corev1.Probe{ // TODO: Investigate if it's ok to call status every 10 seconds - FailureThreshold: int32(30), - PeriodSeconds: int32(240), - InitialDelaySeconds: int32(300), - TimeoutSeconds: int32(60), - Handler: corev1.Handler{ + FailureThreshold: int32(3), + InitialDelaySeconds: int32(30), + PeriodSeconds: func() int32 { + if instance.Spec.LivenessCheckPeriod > 0 { + return int32(instance.Spec.LivenessCheckPeriod) + } + return 60 + }(), + TimeoutSeconds: int32(30), + ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: getLivenessCmd("CATALOG"), + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, }, }, }, /** - // Disabling this because the pod is not reachable till the time startup probe completes and without network pod configuration cannot be completed. - StartupProbe: &corev1.Probe{ - // Initial delay should be big, because shard setup takes time - FailureThreshold: int32(30), - PeriodSeconds: int32(120), - Handler: corev1.Handler{ + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: getLivenessCmd("CATALOG"), + //Command: getReadinessCmd("CATALOG"), + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, }, }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if instance.Spec.ReadinessCheckPeriod > 0 { + return int32(instance.Spec.ReadinessCheckPeriod) + } + return 60 + }(), }, **/ + StartupProbe: &corev1.Probe{ + FailureThreshold: int32(120), + PeriodSeconds: int32(40), + InitialDelaySeconds: int32(30), + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, + }, + }, + }, Env: buildEnvVarsSpec(instance, OraCatalogSpex.EnvVars, OraCatalogSpex.Name, "CATALOG", false, ""), } if instance.Spec.IsClone { @@ -256,8 +283,8 @@ func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, O return result } -//Function to build the init Container Spec -func buildInitContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Container { +// Function to build the init Container Spec +func buildInitContainerSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Container { var result []corev1.Container // building the init Container Spec privFlag := true @@ -293,21 +320,32 @@ func buildInitContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabas return result } -func buildVolumeMountSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "-oradata-vol4", MountPath: oraDataMount}) - result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "orascript-vol5", MountPath: oraScriptMount}) + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "orascript-vol5", MountPath: oraDbScriptMount}) + } result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "oradshm-vol6", MountPath: oraShm}) if len(instance.Spec.StagePvcName) != 0 { result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "orastage-vol7", MountPath: oraStage}) } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + result = append(result, corev1.VolumeMount{Name: instance.Name + "shared-storage", MountPath: getTdeWalletMountLoc(instance)}) + } else { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "shared-storage-vol8", MountPath: getTdeWalletMountLoc(instance)}) + } + } + } return result } -func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -319,16 +357,16 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, { ObjectMeta: metav1.ObjectMeta{ Name: OraCatalogSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteOnce, }, StorageClassName: &instance.Spec.StorageClass, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse(strconv.FormatInt(int64(OraCatalogSpex.StorageSizeInGb), 10) + "Gi"), }, @@ -348,10 +386,38 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, claims[0].Spec.Selector = &metav1.LabelSelector{MatchLabels: OraCatalogSpex.PvMatchLabels} } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + { + pvcClaim := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name + "shared-storage", + Namespace: instance.Namespace, + OwnerReferences: getOwnerRef(instance), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + StorageClassName: &instance.Spec.FssStorageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(strconv.FormatInt(int64(OraCatalogSpex.StorageSizeInGb), 10) + "Gi"), + }, + }, + }, + } + + claims = append(claims, pvcClaim) + } + } + } + return claims } -func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec, svctype string) *corev1.Service { +func BuildServiceDefForCatalog(instance *databasev4.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev4.CatalogSpec, svctype string) *corev1.Service { //service := &corev1.Service{} service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForCatalog(instance, replicaCount, OraCatalogSpex, svctype), @@ -366,7 +432,7 @@ func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, repl if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForCatalog(instance, "sharding") + service.Spec.Selector = getSvcLabelsForCatalog(replicaCount, OraCatalogSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -375,7 +441,7 @@ func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, repl } // Function to build Service ObjectMeta -func buildSvcObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForCatalog(instance *databasev4.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev4.CatalogSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string if svctype == "local" { @@ -388,14 +454,14 @@ func buildSvcObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, r objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), } return objmeta } -func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec) map[string]string { +func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev4.CatalogSpec) map[string]string { var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { @@ -409,8 +475,8 @@ func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev1alpha1. } // ======================== update Section ======================== -func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, - OraCatalogSpex databasev1alpha1.CatalogSpec, kClient client.Client, sfSet *appsv1.StatefulSet, catalogPod *corev1.Pod, logger logr.Logger, +func UpdateProvForCatalog(instance *databasev4.ShardingDatabase, + OraCatalogSpex databasev4.CatalogSpec, kClient client.Client, sfSet *appsv1.StatefulSet, catalogPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var isUpdate bool = false @@ -419,7 +485,7 @@ func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, var msg string //msg = "Inside the updateProvForCatalog" - //reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + //reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) LogMessages("DEBUG", msg, nil, instance, logger) // Memory Check @@ -430,7 +496,7 @@ func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, oraSpexRes := OraCatalogSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/commons/sharding/exec.go b/commons/sharding/exec.go index e368bf74..00caa995 100644 --- a/commons/sharding/exec.go +++ b/commons/sharding/exec.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,29 +40,59 @@ package commons import ( "bytes" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + "fmt" "net/http" + "time" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" + "k8s.io/kubectl/pkg/cmd/cp" + "k8s.io/kubectl/pkg/cmd/util" ) // ExecCMDInContainer execute command in first container of a pod -func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasealphav1.ShardingDatabase, logger logr.Logger) (error, string, string) { +func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, logger logr.Logger) (string, string, error) { + var err1 error = nil var msg string var ( execOut bytes.Buffer execErr bytes.Buffer ) + for i := 0; i < 5; i++ { + if scheme.Scheme == nil { + time.Sleep(time.Second * 40) + } else { + break + } + } + + if kubeClient == nil { + msg = "ExecCommand() : kubeClient is nil" + err1 = fmt.Errorf(msg) + return "Error:", "kubeClient is nil", err1 + } + if kubeConfig == nil { + msg = "ExecCommand() : kubeConfig is nil" + err1 = fmt.Errorf(msg) + return "Error:", "kubeConfig is nil", err1 + } + + msg = "" req := kubeClient.CoreV1().RESTClient(). Post(). - Namespace(instance.Spec.Namespace). + Namespace(instance.Namespace). Resource("pods"). Name(podName). SubResource("exec"). @@ -75,7 +105,9 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, config, err := kubeConfig.ClientConfig() if err != nil { - return err, "Error Occurred", "Error Occurred" + msg = "Error after executing kubeConfig.ClientConfig" + LogMessages("Error", msg, err, instance, logger) + return "Error Occurred", "Error Occurred", err } // Connect to url (constructed from req) using SPDY (HTTP/2) protocol which allows bidirectional streams. @@ -83,7 +115,7 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, if err != nil { msg = "Error after executing remotecommand.NewSPDYExecutor" LogMessages("Error", msg, err, instance, logger) - return err, "Error Occurred", "Error Occurred" + return "Error Occurred", "Error Occurred", err } err = exec.Stream(remotecommand.StreamOptions{ @@ -100,8 +132,73 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, if len(execErr.String()) > 0 { LogMessages("INFO", execErr.String(), nil, instance, logger) } - return err, execOut.String(), execErr.String() + return execOut.String(), execErr.String(), err } - return nil, execOut.String(), execErr.String() + return execOut.String(), execErr.String(), nil +} + +func GetPodCopyConfig(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, logger logr.Logger) (*rest.Config, *kubernetes.Clientset, error) { + + var clientSet *kubernetes.Clientset + config, err := kubeConfig.ClientConfig() + if err != nil { + return config, clientSet, err + } + clientSet, err = kubernetes.NewForConfig(config) + config.APIPath = "/api" + config.GroupVersion = &schema.GroupVersion{Version: "v1"} + config.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: scheme.Codecs} + + return config, clientSet, err + +} + +func KctlCopyFile(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, restConfig *rest.Config, kclientset *kubernetes.Clientset, logger logr.Logger, src string, dst string, containername string) (*bytes.Buffer, *bytes.Buffer, *bytes.Buffer, error) { + + var in, out, errOut *bytes.Buffer + var ioStreams genericclioptions.IOStreams + for count := 0; ; count++ { + ioStreams, in, out, errOut = genericclioptions.NewTestIOStreams() + copyOptions := cp.NewCopyOptions(ioStreams) + copyOptions.ClientConfig = restConfig + if len(containername) != 0 { + copyOptions.Container = containername + } + configFlags := genericclioptions.NewConfigFlags(false) + f := util.NewFactory(configFlags) + cmd := cp.NewCmdCp(f, ioStreams) + err := copyOptions.Complete(f, cmd, []string{src, dst}) + if err != nil { + return nil, nil, nil, err + } + + c := rest.CopyConfig(restConfig) + cs, err := kubernetes.NewForConfig(c) + if err != nil { + return nil, nil, nil, err + } + + copyOptions.ClientConfig = c + copyOptions.Clientset = cs + + err = copyOptions.Run() + if err != nil { + if !shouldRetry(count, err) { + return nil, nil, nil, fmt.Errorf("could not run copy operation: %v. Stdout: %v, Stderr: %v", err, out.String(), errOut.String()) + } + time.Sleep(10 * time.Second) + continue + } + break + } + return in, out, errOut, nil + +} + +func shouldRetry(count int, err error) bool { + if count < connectFailureMaxTries { + return err.Error() == errorDialingBackendEOF + } + return false } diff --git a/commons/sharding/gsm.go b/commons/sharding/gsm.go index 720c00df..e6be8770 100644 --- a/commons/sharding/gsm.go +++ b/commons/sharding/gsm.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -41,10 +41,11 @@ package commons import ( "context" "fmt" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" "reflect" "strconv" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -55,7 +56,7 @@ import ( ) // Constants for hello-stateful StatefulSet & Volumes -func buildLabelsForGsm(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForGsm(instance *databasev4.ShardingDatabase, label string, gsmName string) map[string]string { return map[string]string{ "app": "OracleGsming", "shard_name": "Gsm", @@ -63,7 +64,7 @@ func buildLabelsForGsm(instance *databasev1alpha1.ShardingDatabase, label string } } -func getLabelForGsm(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForGsm(instance *databasev4.ShardingDatabase) string { // if len(OraGsmSpex.Label) !=0 { // return OraGsmSpex.Label @@ -72,7 +73,7 @@ func getLabelForGsm(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *appsv1.StatefulSet { +func BuildStatefulSetForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForGsm(), ObjectMeta: builObjectMetaForGsm(instance, OraGsmSpex), @@ -92,34 +93,35 @@ func buildTypeMetaForGsm() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) metav1.ObjectMeta { +func builObjectMetaForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraGsmSpex.Name, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraGsmSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForGsm(instance, "sharding"), + MatchLabels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForGsm(instance, "sharding"), + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), }, Spec: *buildPodSpecForGsm(instance, OraGsmSpex), }, VolumeClaimTemplates: volumeClaimTemplatesForGsm(instance, OraGsmSpex), } + /** if OraGsmSpex.Replicas == 0 { OraGsmSpex.Replicas = 1 sfsetspec.Replicas = &OraGsmSpex.Replicas @@ -127,13 +129,14 @@ func buildStatefulSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsm OraGsmSpex.Replicas = 1 sfsetspec.Replicas = &OraGsmSpex.Replicas } + **/ return sfsetspec } // Function to build PodSpec -func buildPodSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *corev1.PodSpec { +func buildPodSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -142,10 +145,14 @@ func buildPodSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex RunAsUser: &user, FSGroup: &group, }, - InitContainers: buildInitContainerSpecForGsm(instance, OraGsmSpex), - Containers: buildContainerSpecForGsm(instance, OraGsmSpex), - Volumes: buildVolumeSpecForGsm(instance, OraGsmSpex), + Containers: buildContainerSpecForGsm(instance, OraGsmSpex), + Volumes: buildVolumeSpecForGsm(instance, OraGsmSpex), + } + + if (instance.Spec.IsDownloadScripts) && (instance.Spec.ScriptsLocation != "") { + spec.InitContainers = buildInitContainerSpecForGsm(instance, OraGsmSpex) } + if len(instance.Spec.GsmImagePullSecret) > 0 { spec.ImagePullSecrets = []corev1.LocalObjectReference{ { @@ -163,23 +170,17 @@ func buildPodSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex } // Function to build Volume Spec -func buildVolumeSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Volume { +func buildVolumeSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { Name: OraGsmSpex.Name + "secretmap-vol3", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: instance.Spec.Secret, + SecretName: instance.Spec.DbSecret.Name, }, }, }, - { - Name: OraGsmSpex.Name + "orascript-vol5", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, { Name: OraGsmSpex.Name + "oradshm-vol6", VolumeSource: corev1.VolumeSource{ @@ -196,11 +197,14 @@ func buildVolumeSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSp result = append(result, corev1.Volume{Name: OraGsmSpex.Name + "orastage-vol7", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.StagePvcName}}}) } + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.Volume{Name: OraGsmSpex.Name + "orascript-vol5", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) + } return result } // Function to build the container Specification -func buildContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Container { +func buildContainerSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Container { // building Continer spec var result []corev1.Container var masterGsmFlag = false @@ -229,11 +233,16 @@ func buildContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGs VolumeMounts: buildVolumeMountSpecForGsm(instance, OraGsmSpex), LivenessProbe: &corev1.Probe{ // TODO: Investigate if it's ok to call status every 10 seconds - FailureThreshold: int32(30), - PeriodSeconds: int32(240), - InitialDelaySeconds: int32(300), - TimeoutSeconds: int32(60), - Handler: corev1.Handler{ + FailureThreshold: int32(3), + InitialDelaySeconds: int32(30), + PeriodSeconds: func() int32 { + if instance.Spec.LivenessCheckPeriod > 0 { + return int32(instance.Spec.LivenessCheckPeriod) + } + return 60 + }(), + TimeoutSeconds: int32(20), + ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ Command: getLivenessCmd("GSM"), }, @@ -262,8 +271,8 @@ func buildContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGs return result } -//Function to build the init Container Spec -func buildInitContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Container { +// Function to build the init Container Spec +func buildInitContainerSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Container { var result []corev1.Container // building the init Container Spec privFlag := true @@ -300,11 +309,13 @@ func buildInitContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, O return result } -func buildVolumeMountSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "-oradata-vol4", MountPath: oraGsmDataMount}) - result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "orascript-vol5", MountPath: oraScriptMount}) + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "orascript-vol5", MountPath: oraScriptMount}) + } result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "oradshm-vol6", MountPath: oraShm}) if len(instance.Spec.StagePvcName) != 0 { @@ -314,7 +325,7 @@ func buildVolumeMountSpecForGsm(instance *databasev1alpha1.ShardingDatabase, Ora return result } -func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -326,8 +337,8 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora { ObjectMeta: metav1.ObjectMeta{ Name: OraGsmSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -335,7 +346,7 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora corev1.ReadWriteOnce, }, StorageClassName: &instance.Spec.StorageClass, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse(strconv.FormatInt(int64(OraGsmSpex.StorageSizeInGb), 10) + "Gi"), }, @@ -343,6 +354,14 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora }, }, } + + if len(OraGsmSpex.PvAnnotations) > 0 { + claims[0].ObjectMeta.Annotations = make(map[string]string) + for key, value := range OraGsmSpex.PvAnnotations { + claims[0].ObjectMeta.Annotations[key] = value + } + } + if len(OraGsmSpex.PvMatchLabels) > 0 { claims[0].Spec.Selector = &metav1.LabelSelector{MatchLabels: OraGsmSpex.PvMatchLabels} } @@ -350,7 +369,7 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora return claims } -func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec, svctype string) *corev1.Service { +func BuildServiceDefForGsm(instance *databasev4.ShardingDatabase, replicaCount int32, OraGsmSpex databasev4.GsmSpec, svctype string) *corev1.Service { //service := &corev1.Service{} service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForGsm(instance, replicaCount, OraGsmSpex, svctype), @@ -365,7 +384,7 @@ func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaC if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForGsm(instance, "sharding") + service.Spec.Selector = getSvcLabelsForGsm(replicaCount, OraGsmSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -374,7 +393,7 @@ func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaC } // Function to build Service ObjectMeta -func buildSvcObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForGsm(instance *databasev4.ShardingDatabase, replicaCount int32, OraGsmSpex databasev4.GsmSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string if svctype == "local" { @@ -387,14 +406,14 @@ func buildSvcObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, repli objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } -func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec) map[string]string { +func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev4.GsmSpec) map[string]string { var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { @@ -408,8 +427,8 @@ func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec) } // This function cleanup the shard from GSM -func OraCleanupForGsm(instance *databasev1alpha1.ShardingDatabase, - OraGsmSpex databasev1alpha1.GsmSpec, +func OraCleanupForGsm(instance *databasev4.ShardingDatabase, + OraGsmSpex databasev4.GsmSpec, oldReplicaSize int32, newReplicaSize int32, ) string { @@ -424,13 +443,12 @@ func OraCleanupForGsm(instance *databasev1alpha1.ShardingDatabase, return err1 } -func UpdateProvForGsm(instance *databasev1alpha1.ShardingDatabase, - OraGsmSpex databasev1alpha1.GsmSpec, kClient client.Client, sfSet *appsv1.StatefulSet, gsmPod *corev1.Pod, logger logr.Logger, +func UpdateProvForGsm(instance *databasev4.ShardingDatabase, + OraGsmSpex databasev4.GsmSpec, kClient client.Client, sfSet *appsv1.StatefulSet, gsmPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var msg string - var size int32 - size = 1 + var size int32 = 1 var isUpdate bool = false var err error var i int @@ -443,7 +461,7 @@ func UpdateProvForGsm(instance *databasev1alpha1.ShardingDatabase, // Ensure deployment replicas match the desired state if sfSet.Spec.Replicas != nil { if *sfSet.Spec.Replicas != size { - msg = "Current StatefulSet replicas do not match configured Shard Replicas. Gsm is configured with only 1 but current replicas is set with " + strconv.FormatInt(int64(*sfSet.Spec.Replicas), 10) + msg = "Current StatefulSet replicas do not match configured GSM Replicas. Gsm is configured with only 1 but current replicas is set with " + strconv.FormatInt(int64(*sfSet.Spec.Replicas), 10) LogMessages("DEBUG", msg, nil, instance, logger) isUpdate = true } @@ -456,7 +474,7 @@ func UpdateProvForGsm(instance *databasev1alpha1.ShardingDatabase, oraSpexRes := OraGsmSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/commons/sharding/provstatus.go b/commons/sharding/provstatus.go index ac7e8c02..44544c60 100644 --- a/commons/sharding/provstatus.go +++ b/commons/sharding/provstatus.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,7 +42,7 @@ import ( "fmt" "strconv" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -52,7 +52,7 @@ import ( ) // CHeck if record exist in a struct -func CheckGsmStatusInst(instSpex []databasealphav1.GsmStatusDetails, name string, +func CheckGsmStatusInst(instSpex []databasev4.GsmStatusDetails, name string, ) (int, bool) { var status bool = false @@ -69,63 +69,63 @@ func CheckGsmStatusInst(instSpex []databasealphav1.GsmStatusDetails, name string return idx, status } -func UpdateGsmStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateGsmStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Gsm[Specidx].Name + "-0." + instance.Spec.Gsm[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" K8sInternalSvcName := svcName + "." + getInstanceNs(instance) + ".svc.cluster.local" _, K8sInternalSvcIP, _ := GetSvcIp(instance.Spec.Gsm[Specidx].Name+"-0", K8sInternalSvcName, instance, kubeClient, kubeConfig, logger) _, K8sExternalSvcIP, _ := GetSvcIp(instance.Spec.Gsm[Specidx].Name+"-0", k8sExternalSvcName, instance, kubeClient, kubeConfig, logger) - DbPasswordSecret := instance.Spec.Secret + DbPasswordSecret := instance.Spec.DbSecret.Name instance.Status.Gsm.Services = GetGsmServices(instance.Spec.Gsm[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) // externIp := strings.Replace(K8sInternalSvcIP, "/r/n", "", -1) // internIp := strings.Replace(K8sExternalSvcIP, "/r/n", "", -1) // Populate the Maps - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name), instance.Spec.Gsm[Specidx].Name) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name), instance.Spec.Gsm[Specidx].Name) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.State), state) - } else if state == string(databasealphav1.Terminated) { - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.State), state) + } else if state == string(databasev4.Terminated) { + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Role)) instance.Status.Gsm.Services = "" } else { - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name), instance.Spec.Gsm[Specidx].Name) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name), instance.Spec.Gsm[Specidx].Name) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Role)) instance.Status.Gsm.Services = "" } } -func UpdateCatalogStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateCatalogStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { mode := GetDbOpenMode(instance.Spec.Catalog[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Catalog[Specidx].Name + "-0." + instance.Spec.Catalog[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" K8sInternalSvcName := svcName + "." + getInstanceNs(instance) + ".svc.cluster.local" _, K8sInternalSvcIP, _ := GetSvcIp(instance.Spec.Catalog[Specidx].Name+"-0", K8sInternalSvcName, instance, kubeClient, kubeConfig, logger) _, K8sExternalSvcIP, _ := GetSvcIp(instance.Spec.Catalog[Specidx].Name+"-0", k8sExternalSvcName, instance, kubeClient, kubeConfig, logger) - DbPasswordSecret := instance.Spec.Secret + DbPasswordSecret := instance.Spec.DbSecret.Name oracleSid := GetSidName(instance.Spec.Catalog[Specidx].EnvVars, instance.Spec.Catalog[Specidx].Name) oraclePdb := GetPdbName(instance.Spec.Catalog[Specidx].EnvVars, instance.Spec.Catalog[Specidx].Name) role := GetDbRole(instance.Spec.Catalog[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) @@ -133,107 +133,107 @@ func UpdateCatalogStatusData(instance *databasealphav1.ShardingDatabase, Specidx // internIp := strings.Replace(K8sExternalSvcIP, "/r/n", "", -1) // Populate the Maps - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name), instance.Spec.Catalog[Specidx].Name) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name), instance.Spec.Catalog[Specidx].Name) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid), oracleSid) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb), oraclePdb) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role), role) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode), mode) - } else if state == string(databasealphav1.Terminated) { - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode)) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State), state) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid), oracleSid) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb), oraclePdb) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role), role) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode), mode) + } else if state == string(databasev4.Terminated) { + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode)) } else { - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name), instance.Spec.Catalog[Specidx].Name) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode), mode) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State), state) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name), instance.Spec.Catalog[Specidx].Name) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode), mode) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) } } -func UpdateShardStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateShardStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { mode := GetDbOpenMode(instance.Spec.Shard[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Shard[Specidx].Name + "-0." + instance.Spec.Shard[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" K8sInternalSvcName := svcName + "." + getInstanceNs(instance) + ".svc.cluster.local" _, K8sInternalSvcIP, _ := GetSvcIp(instance.Spec.Shard[Specidx].Name+"-0", K8sInternalSvcName, instance, kubeClient, kubeConfig, logger) _, K8sExternalSvcIP, _ := GetSvcIp(instance.Spec.Shard[Specidx].Name+"-0", k8sExternalSvcName, instance, kubeClient, kubeConfig, logger) - DbPasswordSecret := instance.Spec.Secret + DbPasswordSecret := instance.Spec.DbSecret.Name oracleSid := GetSidName(instance.Spec.Shard[Specidx].EnvVars, instance.Spec.Shard[Specidx].Name) oraclePdb := GetPdbName(instance.Spec.Shard[Specidx].EnvVars, instance.Spec.Shard[Specidx].Name) role := GetDbRole(instance.Spec.Shard[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) // Populate the Maps - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name), instance.Spec.Shard[Specidx].Name) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name), instance.Spec.Shard[Specidx].Name) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid), oracleSid) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb), oraclePdb) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role), role) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode), mode) - } else if state == string(databasealphav1.Terminated) { - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode)) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State), state) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid), oracleSid) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb), oraclePdb) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role), role) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode), mode) + } else if state == string(databasev4.Terminated) { + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode)) } else { - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name), instance.Spec.Shard[Specidx].Name) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode), mode) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State), state) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name), instance.Spec.Shard[Specidx].Name) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode), mode) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) } } -func insertOrUpdateShardKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateShardKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Shard) > 0 { if _, ok := instance.Status.Shard[newKey]; ok { @@ -248,7 +248,7 @@ func insertOrUpdateShardKeys(instance *databasealphav1.ShardingDatabase, name st } -func removeShardKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeShardKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Shard) > 0 { if _, ok := instance.Status.Shard[newKey]; ok { @@ -258,7 +258,7 @@ func removeShardKeys(instance *databasealphav1.ShardingDatabase, name string, ke } } -func insertOrUpdateCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateCatalogKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Catalog) > 0 { if _, ok := instance.Status.Catalog[newKey]; ok { @@ -273,7 +273,7 @@ func insertOrUpdateCatalogKeys(instance *databasealphav1.ShardingDatabase, name } -func removeCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeCatalogKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Catalog) > 0 { if _, ok := instance.Status.Catalog[newKey]; ok { @@ -283,7 +283,7 @@ func removeCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, } } -func insertOrUpdateGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateGsmKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Gsm.Details) > 0 { if _, ok := instance.Status.Gsm.Details[newKey]; ok { @@ -298,7 +298,7 @@ func insertOrUpdateGsmKeys(instance *databasealphav1.ShardingDatabase, name stri } -func removeGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeGsmKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Gsm.Details) > 0 { if _, ok := instance.Status.Gsm.Details[newKey]; ok { @@ -308,18 +308,18 @@ func removeGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key } } -func getInstanceNs(instance *databasealphav1.ShardingDatabase) string { +func getInstanceNs(instance *databasev4.ShardingDatabase) string { var namespace string - if instance.Spec.Namespace == "" { + if instance.Namespace == "" { namespace = "default" } else { - namespace = instance.Spec.Namespace + namespace = instance.Namespace } return namespace } // File the meta condition and return the meta view -func GetMetaCondition(instance *databasealphav1.ShardingDatabase, result *ctrl.Result, err *error, stateType string, stateMsg string) metav1.Condition { +func GetMetaCondition(instance *databasev4.ShardingDatabase, result *ctrl.Result, err *error, stateType string, stateMsg string) metav1.Condition { return metav1.Condition{ Type: stateType, @@ -332,15 +332,14 @@ func GetMetaCondition(instance *databasealphav1.ShardingDatabase, result *ctrl.R } // ======================= CHeck GSM Director Status ============== -func CheckGsmStatus(gname string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckGsmStatus(gname string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { var err error - var msg string + var msg string = "Inside the checkGsmStatus. Checking GSM director in " + GetFmtStr(gname) + " pod." - msg = "Inside the checkGsmStatus. Checking GSM director in " + GetFmtStr(gname) + " pod." LogMessages("DEBUG", msg, nil, instance, logger) - err, _, _ = ExecCommand(gname, getGsmvalidateCmd(), kubeClient, kubeconfig, instance, logger) + _, _, err = ExecCommand(gname, getGsmvalidateCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { return err } @@ -348,19 +347,20 @@ func CheckGsmStatus(gname string, instance *databasealphav1.ShardingDatabase, ku return nil } -//============ Functiont o check the status of the Shard and catalog ========= +// ============ Functiont o check the status of the Shard and catalog ========= // ================================ Validate shard =========================== -func ValidateDbSetup(podName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func ValidateDbSetup(podName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(podName, shardValidationCmd(), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(podName, shardValidationCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { - return fmt.Errorf("Error ocurred while validating the DB Setup") + + return fmt.Errorf("error ocurred while validating the DB Setup") } return nil } -func UpdateGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string, state string) { +func UpdateGsmShardStatus(instance *databasev4.ShardingDatabase, name string, state string) { //smap := make(map[string]string) if _, ok := instance.Status.Gsm.Shards[name]; ok { instance.Status.Gsm.Shards[name] = state @@ -384,7 +384,7 @@ func UpdateGsmShardStatus(instance *databasealphav1.ShardingDatabase, name strin } -func GetGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string) string { +func GetGsmShardStatus(instance *databasev4.ShardingDatabase, name string) string { if _, ok := instance.Status.Gsm.Shards[name]; ok { return instance.Status.Gsm.Shards[name] @@ -393,7 +393,7 @@ func GetGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string) } -func GetGsmShardStatusKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmShardStatusKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Shard[key]; ok { return instance.Status.Shard[key] @@ -402,7 +402,7 @@ func GetGsmShardStatusKey(instance *databasealphav1.ShardingDatabase, key string } -func GetGsmCatalogStatusKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmCatalogStatusKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Catalog[key]; ok { return instance.Status.Catalog[key] @@ -411,7 +411,7 @@ func GetGsmCatalogStatusKey(instance *databasealphav1.ShardingDatabase, key stri } -func GetGsmDetailsSttausKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmDetailsSttausKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Gsm.Details[key]; ok { return instance.Status.Gsm.Details[key] diff --git a/commons/sharding/scommon.go b/commons/sharding/scommon.go index 55fdd5dc..3b3f1b04 100644 --- a/commons/sharding/scommon.go +++ b/commons/sharding/scommon.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,16 +40,21 @@ package commons import ( "context" + "encoding/json" "fmt" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + "slices" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "regexp" "strconv" "strings" + "os" + "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/ons" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/ons" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -76,6 +81,7 @@ const ( oraRunAsUser = int64(54321) oraFsGroup = int64(54321) oraScriptMount = "/opt/oracle/scripts/sharding/scripts" + oraDbScriptMount = "/opt/oracle/scripts/sharding" oraDataMount = "/opt/oracle/oradata" oraGsmDataMount = "/opt/oracle/gsmdata" oraConfigMapMount = "/mnt/config-map" @@ -90,41 +96,38 @@ const ( oraLocalOnsPort = 6123 oraAgentPort = 8080 ShardingDatabaseFinalizer = "Shardingdb.oracle.com" + TmpLoc = "/var/tmp" + connectFailureMaxTries = 5 + errorDialingBackendEOF = "error dialing backend: EOF" ) // Function to build the env var specification -func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []databasealphav1.EnvironmentVariable, name string, restype string, masterFlag bool, directorParams string) []corev1.EnvVar { +func buildEnvVarsSpec(instance *databasev4.ShardingDatabase, variables []databasev4.EnvironmentVariable, name string, restype string, masterFlag bool, directorParams string) []corev1.EnvVar { var result []corev1.EnvVar var varinfo string var sidFlag bool = false - var secretFlag bool = false - var pwdFileFLag bool = false - var pwdKeyFlag bool = false + //var sidValue string + var pdbValue string var pdbFlag bool = false var sDirectParam bool = false var sGroup1Params bool = false - var sGroup2Params bool = false + //var sGroup2Params bool = false var catalogParams bool = false var oldPdbFlag bool = false var oldSidFlag bool = false var archiveLogFlag bool = false var shardSetupFlag bool = false + var dbUnameFlag bool = false + var ofreePdbFlag bool = false for _, variable := range variables { if variable.Name == "ORACLE_SID" { sidFlag = true - } - if variable.Name == "SECRET_VOLUME" { - secretFlag = true - } - if variable.Name == "COMMON_OS_PWD_FILE" { - pwdFileFLag = true - } - if variable.Name == "PWD_KEY" { - pwdKeyFlag = true + //sidValue = variable.Value } if variable.Name == "ORACLE_PDB" { pdbFlag = true + pdbValue = variable.Value } if variable.Name == "SHARD_DIRECTOR_PARAMS" { sDirectParam = true @@ -132,9 +135,6 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da if variable.Name == "SHARD1_GROUP_PARAMS" { sGroup1Params = true } - if variable.Name == "SHARD2_GROUP_PARAMS" { - sGroup2Params = true - } if variable.Name == "CATALOG_PARAMS" { catalogParams = true } @@ -150,8 +150,32 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da if variable.Name == "OLD_ORACLE_PDB" { archiveLogFlag = true } + if variable.Name == "DB_UNIQUE_NAME" { + dbUnameFlag = true + } + if variable.Name == "ORACLE_FREE_PDB" { + ofreePdbFlag = true + } + result = append(result, corev1.EnvVar{Name: variable.Name, Value: variable.Value}) } + + if !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "DB_UNIQUE_NAME", Value: strings.ToUpper(name)}) + } + } + + if !ofreePdbFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + if pdbFlag { + result = append(result, corev1.EnvVar{Name: "ORACLE_FREE_PDB", Value: pdbValue}) + } else { + result = append(result, corev1.EnvVar{Name: "ORACLE_FREE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } + } + } + if !shardSetupFlag { if restype == "SHARD" { result = append(result, corev1.EnvVar{Name: "SHARD_SETUP", Value: "true"}) @@ -171,46 +195,112 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da result = append(result, corev1.EnvVar{Name: "ENABLE_ARCHIVELOG", Value: "true"}) } } - if !sidFlag { - if restype == "SHARD" { - result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) - } - if restype == "CATALOG" { - result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: "FREE"}) + } else { + if restype == "SHARD" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + } + if restype == "CATALOG" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + } } } if !pdbFlag { - if restype == "SHARD" { - result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) - } - if restype == "CATALOG" { - result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: "FREEPDB"}) + } else { + if restype == "SHARD" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } + if restype == "CATALOG" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } } } - if !secretFlag { - result = append(result, corev1.EnvVar{Name: "SECRET_VOLUME", Value: "/mnt/secrets"}) + // Secret Settings + + if strings.ToLower(instance.Spec.DbSecret.EncryptionType) != "base64" { + result = append(result, corev1.EnvVar{Name: "PWD_KEY", Value: instance.Spec.DbSecret.KeyFileName}) + result = append(result, corev1.EnvVar{Name: "COMMON_OS_PWD_FILE", Value: instance.Spec.DbSecret.PwdFileName}) + } else { + result = append(result, corev1.EnvVar{Name: "PASSWORD_FILE", Value: instance.Spec.DbSecret.PwdFileName}) } - if !pwdFileFLag { - result = append(result, corev1.EnvVar{Name: "COMMON_OS_PWD_FILE", Value: "common_os_pwdfile.enc"}) + if len(instance.Spec.DbSecret.PwdFileMountLocation) != 0 { + result = append(result, corev1.EnvVar{Name: "SECRET_VOLUME", Value: instance.Spec.DbSecret.PwdFileMountLocation}) + } else { + result = append(result, corev1.EnvVar{Name: "SECRET_VOLUME", Value: oraSecretMount}) } - if !pwdKeyFlag { - result = append(result, corev1.EnvVar{Name: "PWD_KEY", Value: "pwd.key"}) + if len(instance.Spec.DbSecret.KeyFileMountLocation) != 0 { + result = append(result, corev1.EnvVar{Name: "KEY_SECRET_VOLUME", Value: instance.Spec.DbSecret.KeyFileMountLocation}) + } else { + result = append(result, corev1.EnvVar{Name: "KEY_SECRET_VOLUME", Value: oraSecretMount}) } + if restype == "GSM" { if !sDirectParam { - // varinfo = "director_name=sharddirector" + sDirectorCounter + ";director_region=primary;director_port=1521" + //varinfo = "director_name=sharddirector" + sDirectorCounter + ";director_region=primary;director_port=1521" varinfo = directorParams result = append(result, corev1.EnvVar{Name: "SHARD_DIRECTOR_PARAMS", Value: varinfo}) } - if !sGroup1Params { - varinfo = "group_name=shardgroup1;deploy_as=primary;group_region=primary" - result = append(result, corev1.EnvVar{Name: "SHARD1_GROUP_PARAMS", Value: varinfo}) + if strings.ToUpper(instance.Spec.ShardingType) != "USER" { + if !sGroup1Params { + if len(instance.Spec.GsmShardGroup) > 0 { + for i := 0; i < len(instance.Spec.GsmShardGroup); i++ { + if strings.ToUpper(instance.Spec.GsmShardGroup[i].DeployAs) == "PRIMARY" { + group_name := instance.Spec.GsmShardGroup[i].Name + //deploy_as := instance.Spec.ShardGroup[i].DeployAs + region := instance.Spec.GsmShardGroup[i].Region + varinfo = "group_name=" + group_name + ";" + "deploy_as=primary;" + "group_region=" + region + result = append(result, corev1.EnvVar{Name: "SHARD1_GROUP_PARAMS", Value: varinfo}) + } + if strings.ToUpper(instance.Spec.GsmShardGroup[i].DeployAs) == "STANDBY" { + group_name := instance.Spec.GsmShardGroup[i].Name + //deploy_as := instance.Spec.ShardGroup[i].DeployAs + region := instance.Spec.GsmShardGroup[i].Region + varinfo = "group_name=" + group_name + ";" + "deploy_as=standby;" + "group_region=" + region + result = append(result, corev1.EnvVar{Name: "SHARD2_GROUP_PARAMS", Value: varinfo}) + } + } + } + } else { + varinfo = "group_name=shardgroup1;deploy_as=primary;group_region=primary" + result = append(result, corev1.EnvVar{Name: "SHARD1_GROUP_PARAMS", Value: varinfo}) + } + } + + if strings.ToUpper(instance.Spec.ShardingType) == "USER" { + result = append(result, corev1.EnvVar{Name: "SHARDING_TYPE", Value: "USER"}) + } + // SERVICE Params setting + var svc string + if len(instance.Spec.GsmService) > 0 { + svc = "" + for i := 0; i < len(instance.Spec.GsmService); i++ { + svc = svc + "service_name=" + instance.Spec.GsmService[i].Name + ";" + if len(instance.Spec.GsmService[i].Role) != 0 { + svc = svc + "service_role=" + instance.Spec.GsmService[i].Role + } else { + svc = svc + "service_role=primary" + } + result = append(result, corev1.EnvVar{Name: "SERVICE" + fmt.Sprint(i) + "_PARAMS", Value: svc}) + svc = "" + } } - if instance.Spec.IsDataGuard { - if !sGroup2Params { - varinfo = "group_name=shardgroup2;deploy_as=standby;group_region=standby" - result = append(result, corev1.EnvVar{Name: "SHARD2_GROUP_PARAMS", Value: varinfo}) + + if strings.ToUpper(instance.Spec.GsmDevMode) != "FALSE" { + result = append(result, corev1.EnvVar{Name: "DEV_MODE", Value: "TRUE"}) + } + + if instance.Spec.InvitedNodeSubnetFlag == "" { + instance.Spec.InvitedNodeSubnetFlag = "TRUE" + + } + if strings.ToUpper(instance.Spec.InvitedNodeSubnetFlag) != "FALSE" { + result = append(result, corev1.EnvVar{Name: "INVITED_NODE_SUBNET_FLAG", Value: "TRUE"}) + if instance.Spec.InvitedNodeSubnet != "" { + result = append(result, corev1.EnvVar{Name: "INVITED_NODE_SUBNET", Value: instance.Spec.InvitedNodeSubnet}) } } if !catalogParams { @@ -260,7 +350,7 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da } // FUnction to build the svc definition for catalog/shard and GSM -func buildSvcPortsDef(instance *databasealphav1.ShardingDatabase, resType string) []corev1.ServicePort { +func buildSvcPortsDef(instance *databasev4.ShardingDatabase, resType string) []corev1.ServicePort { var result []corev1.ServicePort if len(instance.Spec.PortMappings) > 0 { for _, portMapping := range instance.Spec.PortMappings { @@ -302,12 +392,12 @@ func generateName(base string) string { } // Function to generate the port mapping -func generatePortMapping(portMapping databasealphav1.PortMapping) string { +func generatePortMapping(portMapping databasev4.PortMapping) string { return generateName(fmt.Sprintf("%s-%d-%d-", "tcp", portMapping.Port, portMapping.TargetPort)) } -func LogMessages(msgtype string, msg string, err error, instance *databasealphav1.ShardingDatabase, logger logr.Logger) { +func LogMessages(msgtype string, msg string, err error, instance *databasev4.ShardingDatabase, logger logr.Logger) { // setting logrus formatter //logrus.SetFormatter(&logrus.JSONFormatter{}) //logrus.SetOutput(os.Stdout) @@ -320,6 +410,8 @@ func LogMessages(msgtype string, msg string, err error, instance *databasealphav } } else if msgtype == "INFO" { logger.Info(msg) + } else if msgtype == "Error" { + logger.Error(err, msg) } } @@ -328,7 +420,7 @@ func GetGsmPodName(gsmName string) string { return podName } -func GetSidName(variables []databasealphav1.EnvironmentVariable, name string) string { +func GetSidName(variables []databasev4.EnvironmentVariable, name string) string { var result string for _, variable := range variables { @@ -342,7 +434,7 @@ func GetSidName(variables []databasealphav1.EnvironmentVariable, name string) st return result } -func GetPdbName(variables []databasealphav1.EnvironmentVariable, name string) string { +func GetPdbName(variables []databasev4.EnvironmentVariable, name string) string { var result string for _, variable := range variables { @@ -356,34 +448,34 @@ func GetPdbName(variables []databasealphav1.EnvironmentVariable, name string) st return result } -func getlabelsForGsm(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForGsm(instance, "sharding") +func getlabelsForGsm(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForGsm(instance, "sharding", "gsm") } -func getlabelsForShard(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForShard(instance, "sharding") +func getlabelsForShard(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForShard(instance, "sharding", "shard") } -func getlabelsForCatalog(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForCatalog(instance, "sharding") +func getlabelsForCatalog(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForCatalog(instance, "sharding", "catalog") } -func LabelsForProvShardKind(instance *databasealphav1.ShardingDatabase, sftype string, +func LabelsForProvShardKind(instance *databasev4.ShardingDatabase, sftype string, ) map[string]string { if sftype == "shard" { - return buildLabelsForShard(instance, "sharding") + return buildLabelsForShard(instance, "sharding", "shard") } return nil } -func CheckSfset(sfsetName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*appsv1.StatefulSet, error) { +func CheckSfset(sfsetName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*appsv1.StatefulSet, error) { sfSetFound := &appsv1.StatefulSet{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: sfsetName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, sfSetFound) if err != nil { return sfSetFound, err @@ -391,11 +483,11 @@ func CheckSfset(sfsetName string, instance *databasealphav1.ShardingDatabase, kC return sfSetFound, nil } -func checkPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*corev1.PersistentVolumeClaim, error) { +func checkPvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*corev1.PersistentVolumeClaim, error) { pvcFound := &corev1.PersistentVolumeClaim{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: pvcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, pvcFound) if err != nil { return pvcFound, err @@ -403,7 +495,7 @@ func checkPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClien return pvcFound, nil } -func DelPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger) error { +func DelPvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) error { LogMessages("DEBUG", "Inside the delPvc and received param: "+GetFmtStr(pvcName), nil, instance, logger) pvcFound, err := checkPvc(pvcName, instance, kClient) @@ -419,7 +511,7 @@ func DelPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient return nil } -func DelSvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger) error { +func DelSvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) error { LogMessages("DEBUG", "Inside the delPvc and received param: "+GetFmtStr(pvcName), nil, instance, logger) pvcFound, err := checkPvc(pvcName, instance, kClient) @@ -435,11 +527,11 @@ func DelSvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient return nil } -func CheckSvc(svcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*corev1.Service, error) { +func CheckSvc(svcName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*corev1.Service, error) { svcFound := &corev1.Service{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: svcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, svcFound) if err != nil { return svcFound, err @@ -447,7 +539,7 @@ func CheckSvc(svcName string, instance *databasealphav1.ShardingDatabase, kClien return svcFound, nil } -func PodListValidation(podList *corev1.PodList, sfName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func PodListValidation(podList *corev1.PodList, sfName string, instance *databasev4.ShardingDatabase, kClient client.Client, ) (bool, *corev1.Pod) { var isPodExist bool = false @@ -483,22 +575,28 @@ func PodListValidation(podList *corev1.PodList, sfName string, instance *databas return isPodExist, podInfo } -func GetPodList(sfsetName string, resType string, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func GetPodList(sfsetName string, resType string, instance *databasev4.ShardingDatabase, kClient client.Client, ) (*corev1.PodList, error) { podList := &corev1.PodList{} - labelSelector := labels.SelectorFromSet(getlabelsForGsm(instance)) - if resType == "GSM" { + //labelSelector := labels.SelectorFromSet(getlabelsForGsm(instance)) + //labelSelector := map[string]labels.Selector{} + var labelSelector labels.Selector + + //labels.SelectorFromSet() + + switch resType { + case "GSM": labelSelector = labels.SelectorFromSet(getlabelsForGsm(instance)) - } else if resType == "SHARD" { + case "SHARD": labelSelector = labels.SelectorFromSet(getlabelsForShard(instance)) - } else if resType == "CATALOG" { + case "CATALOG": labelSelector = labels.SelectorFromSet(getlabelsForCatalog(instance)) - } else { - err1 := fmt.Errorf("Wrong resources type passed. Supported values are SHARD,GSM and CATALOG") + default: + err1 := fmt.Errorf("wrong resources type passed. Supported values are SHARD,GSM and CATALOG") return nil, err1 } - listOps := &client.ListOptions{Namespace: instance.Spec.Namespace, LabelSelector: labelSelector} + listOps := &client.ListOptions{Namespace: instance.Namespace, LabelSelector: labelSelector} err := kClient.List(context.TODO(), podList, listOps) if err != nil { @@ -507,11 +605,11 @@ func GetPodList(sfsetName string, resType string, instance *databasealphav1.Shar return podList, nil } -func checkPod(instance *databasealphav1.ShardingDatabase, pod *corev1.Pod, kClient client.Client, +func checkPod(instance *databasev4.ShardingDatabase, pod *corev1.Pod, kClient client.Client, ) error { err := kClient.Get(context.TODO(), types.NamespacedName{ Name: pod.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, pod) if err != nil { @@ -567,15 +665,15 @@ func checkContainerStatus(pod *corev1.Pod, kClient client.Client, // Namespace related function -func AddNamespace(instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func AddNamespace(instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) error { var msg string ns := &corev1.Namespace{} - err := kClient.Get(context.TODO(), types.NamespacedName{Name: instance.Spec.Namespace}, ns) + err := kClient.Get(context.TODO(), types.NamespacedName{Name: instance.Namespace}, ns) if err != nil { - //msg = "Namespace " + instance.Spec.Namespace + " doesn't exist! creating namespace" + //msg = "Namespace " + instance.Namespace + " doesn't exist! creating namespace" if errors.IsNotFound(err) { - err = kClient.Create(context.TODO(), NewNamespace(instance.Spec.Namespace)) + err = kClient.Create(context.TODO(), NewNamespace(instance.Namespace)) if err != nil { msg = "Error in creating namespace!" LogMessages("Error", msg, nil, instance, logger) @@ -603,7 +701,7 @@ func NewNamespace(name string) *corev1.Namespace { } } -func getOwnerRef(instance *databasealphav1.ShardingDatabase, +func getOwnerRef(instance *databasev4.ShardingDatabase, ) []metav1.OwnerReference { var ownerRef []metav1.OwnerReference @@ -611,42 +709,92 @@ func getOwnerRef(instance *databasealphav1.ShardingDatabase, return ownerRef } -func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { - var variables []databasealphav1.EnvironmentVariable - variables = instance.Spec.Catalog[0].EnvVars +func buildCatalogParams(instance *databasev4.ShardingDatabase) string { + var variables []databasev4.EnvironmentVariable = instance.Spec.Catalog[0].EnvVars var result string var varinfo string var sidFlag bool = false var pdbFlag bool = false var portFlag bool = false - var regionFlag bool = false var cnameFlag bool = false var chunksFlag bool = false var sidName string var pdbName string var cport string - var cregion string var cname string var catchunks string + var catalog_region, shard_space string result = "catalog_host=" + instance.Spec.Catalog[0].Name + "-0" + "." + instance.Spec.Catalog[0].Name + ";" + + //Checking if replcia type set to native + var sspace_arr []string + if strings.ToUpper(instance.Spec.ShardingType) == "USER" { + shard_space = "" + result = result + "sharding_type=user;" + for i := 0; i < len(instance.Spec.Shard); i++ { + sspace_arr = append(sspace_arr, instance.Spec.Shard[i].ShardSpace) + } + slices.Sort(sspace_arr) + sspace_arr = slices.Compact(sspace_arr) //[a b c d] + for i := 0; i < len(sspace_arr); i++ { + shard_space = shard_space + sspace_arr[i] + "," + } + shard_space = strings.TrimSuffix(shard_space, ",") + result = result + "shard_space=" + shard_space + ";" + } else if strings.ToUpper(instance.Spec.ReplicationType) == "NATIVE" { + result = result + "repl_type=native;" + } else { + fmt.Fprintln(os.Stdout, []any{""}...) + } + + var region_arr []string + for i := 0; i < len(instance.Spec.Shard); i++ { + region_arr = append(region_arr, instance.Spec.Shard[i].ShardRegion) + } + + for i := 0; i < len(instance.Spec.Gsm); i++ { + region_arr = append(region_arr, instance.Spec.Gsm[i].Region) + } + + slices.Sort(region_arr) + region_arr = slices.Compact(region_arr) //[a b c d] + for i := 0; i < len(region_arr); i++ { + catalog_region = catalog_region + region_arr[i] + "," + } + catalog_region = strings.TrimSuffix(catalog_region, ",") + result = result + "catalog_region=" + catalog_region + ";" + + if len(instance.Spec.ShardConfigName) != 0 { + result = result + "shard_configname=" + instance.Spec.ShardConfigName + ";" + } + for _, variable := range variables { - if variable.Name == "ORACLE_SID" { + if variable.Name == "DB_UNIQUE_NAME" { sidFlag = true sidName = variable.Value + } else { + if variable.Name == "ORACLE_SID" { + sidFlag = true + sidName = variable.Value + } } - if variable.Name == "ORACLE_PDB" { - pdbFlag = true - pdbName = variable.Value + if variable.Name == "ORACLE_FREE_PDB" { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + pdbFlag = true + pdbName = variable.Value + } + } + if strings.ToLower(instance.Spec.DbEdition) != "free" { + if variable.Name == "ORACLE_PDB" { + pdbFlag = true + pdbName = variable.Value + } } if variable.Name == "CATALOG_PORT" { portFlag = true cport = variable.Value } - if variable.Name == "CATALOG_REGION" { - regionFlag = true - cregion = variable.Value - } if variable.Name == "CATALOG_NAME" { cnameFlag = true cname = variable.Value @@ -655,22 +803,33 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { chunksFlag = true catchunks = variable.Value } + } if !sidFlag { varinfo = "catalog_db=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + ";" result = result + varinfo } else { - varinfo = "catalog_db=" + strings.ToUpper(sidName) + ";" - result = result + varinfo + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "catalog_db=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + ";" + result = result + varinfo + } else { + varinfo = "catalog_db=" + strings.ToUpper(sidName) + ";" + result = result + varinfo + } } if !pdbFlag { varinfo = "catalog_pdb=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + "PDB" + ";" result = result + varinfo } else { - varinfo = "catalog_pdb=" + strings.ToUpper(pdbName) + ";" - result = result + varinfo + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "catalog_pdb=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + "PDB" + ";" + result = result + varinfo + } else { + varinfo = "catalog_pdb=" + strings.ToUpper(pdbName) + ";" + result = result + varinfo + } } if !portFlag { @@ -688,23 +847,20 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { varinfo = "catalog_name=" + strings.ToUpper(cname) + ";" result = result + varinfo } + if chunksFlag { result = result + "catalog_chunks=" + catchunks + ";" - } - - if !regionFlag { - varinfo = "catalog_region=primary,standby" - result = result + varinfo } else { - varinfo = "catalog_region=" + cregion - result = result + varinfo + if strings.ToLower(instance.Spec.DbEdition) == "free" && strings.ToUpper(instance.Spec.ShardingType) != "USER" && strings.ToUpper(instance.Spec.ShardingType) != "NATIVE" { + result = result + "catalog_chunks=12;" + } } - + result = strings.TrimSuffix(result, ";") return result } -func buildDirectorParams(instance *databasealphav1.ShardingDatabase, oraGsmSpex databasealphav1.GsmSpec, idx int) string { - var variables []databasealphav1.EnvironmentVariable +func buildDirectorParams(instance *databasev4.ShardingDatabase, oraGsmSpex databasev4.GsmSpec, idx int) string { + var variables []databasev4.EnvironmentVariable var result string var varinfo string var dnameFlag bool = false @@ -725,72 +881,145 @@ func buildDirectorParams(instance *databasealphav1.ShardingDatabase, oraGsmSpex result = result + varinfo } - if idx == 0 { - varinfo = "director_region=primary;" - result = result + varinfo - } else if idx == 1 { - varinfo = "director_region=standby;" + if oraGsmSpex.Region != "" { + varinfo = "director_region=" + oraGsmSpex.Region + ";" result = result + varinfo } else { - // Do nothing + switch idx { + case 0: + varinfo = "director_region=primary;" + result = result + varinfo + case 1: + varinfo = "director_region=standby;" + result = result + varinfo + default: + // Do nothing + } + result = result + varinfo } if !dportFlag { varinfo = "director_port=1522" result = result + varinfo } - + result = strings.TrimSuffix(result, ";") return result } -func BuildShardParams(sfSet *appsv1.StatefulSet) string { - var variables []corev1.EnvVar - variables = sfSet.Spec.Template.Spec.Containers[0].Env +func BuildShardParams(instance *databasev4.ShardingDatabase, sfSet *appsv1.StatefulSet, OraShardSpex databasev4.ShardSpec) string { + var variables []corev1.EnvVar = sfSet.Spec.Template.Spec.Containers[0].Env var result string var varinfo string var isShardPort bool = false - var isShardGrp bool = false + var freePdbFlag bool = false + var freePdbValue string + var pdbFlag bool = false + var pdbValue string + var dbUnameFlag bool = false + var sidFlag bool = false + var dbUname string + var sidName string + + //var isShardGrp bool = false + //var i int32 + //var isShardSpace bool = false + //var isShardRegion bool = false result = "shard_host=" + sfSet.Name + "-0" + "." + sfSet.Name + ";" for _, variable := range variables { - if variable.Name == "ORACLE_SID" { - varinfo = "shard_db=" + variable.Value + ";" - result = result + varinfo + if variable.Name == "DB_UNIQUE_NAME" { + dbUnameFlag = true + dbUname = variable.Value + } else { + if variable.Name == "ORACLE_SID" { + sidFlag = true + sidName = variable.Value + } + } + if variable.Name == "ORACLE_FREE_PDB" { + freePdbFlag = true + freePdbValue = variable.Value } + if variable.Name == "ORACLE_PDB" { - varinfo = "shard_pdb=" + variable.Value + ";" - result = result + varinfo + pdbFlag = true + pdbValue = variable.Value } + if variable.Name == "SHARD_PORT" { varinfo = "shard_port=" + variable.Value + ";" result = result + varinfo isShardPort = true } - if variable.Name == "SHARD_GROUP" { - varinfo = "shard_group=" + variable.Value + ";" + + } + + if dbUnameFlag { + varinfo = "shard_db=" + dbUname + ";" + result = result + varinfo + } + + if sidFlag && !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) != "free" { + varinfo = "shard_db=" + sidName + ";" + result = result + varinfo + } else { + varinfo = "shard_db=" + sfSet.Name + ";" result = result + varinfo - isShardGrp = true } } - if !isShardPort { - varinfo = "shard_port=" + "1521" + ";" + if !sidFlag && !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) != "free" { + varinfo = "shard_db=" + sfSet.Name + ";" + result = result + varinfo + } + } + + if freePdbFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "shard_pdb=" + freePdbValue + ";" + result = result + varinfo + } + } else { + if pdbFlag { + varinfo = "shard_pdb=" + pdbValue + ";" + result = result + varinfo + } + } + + if OraShardSpex.ShardGroup != "" { + varinfo = "shard_group=" + OraShardSpex.ShardGroup + ";" result = result + varinfo } - if !isShardGrp { - varinfo = "shard_group=" + "shardgroup1" + if OraShardSpex.ShardSpace != "" { + varinfo = "shard_space=" + OraShardSpex.ShardSpace + ";" + result = result + varinfo + } + if OraShardSpex.ShardRegion != "" { + varinfo = "shard_region=" + OraShardSpex.ShardRegion + ";" result = result + varinfo } + if OraShardSpex.DeployAs != "" { + varinfo = "deploy_as=" + OraShardSpex.DeployAs + ";" + result = result + varinfo + } + + if !isShardPort { + varinfo = "shard_port=" + "1521" + ";" + result = result + varinfo + } + result = strings.TrimSuffix(result, ";") return result } -func labelsForShardingDatabaseKind(instance *databasealphav1.ShardingDatabase, sftype string, +func labelsForShardingDatabaseKind(instance *databasev4.ShardingDatabase, sftype string, ) map[string]string { if sftype == "shard" { - return buildLabelsForShard(instance, "sharding") + return buildLabelsForShard(instance, "sharding", "shard") } return nil @@ -836,32 +1065,28 @@ func GetShardInviteNodeCmd(shardName string) []string { } func getCancelChunksCmd(sparamStr string) []string { - var cancelChunkCmd []string - cancelChunkCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--cancelchunks=" + strconv.Quote(sparamStr), "--optype=gsm"} + var cancelChunkCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--cancelchunks=" + strconv.Quote(sparamStr), "--optype=gsm"} return cancelChunkCmd } func getMoveChunksCmd(sparamStr string) []string { - var moveChunkCmd []string - moveChunkCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--movechunks=" + strconv.Quote(sparamStr), "--optype=gsm"} + var moveChunkCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--movechunks=" + strconv.Quote(sparamStr), "--optype=gsm"} return moveChunkCmd } func getNoChunksCmd(sparamStr string) []string { - var noChunkCmd []string - noChunkCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--validatenochunks=" + strconv.Quote(sparamStr), "--optype=gsm"} + var noChunkCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--validatenochunks=" + strconv.Quote(sparamStr), "--optype=gsm"} return noChunkCmd } func shardValidationCmd() []string { - var oraShardValidateCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true ", "--optype=primaryshard"} + var oraShardValidateCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkliveness=true ", "--optype=primaryshard"} return oraShardValidateCmd } func getShardCheckCmd(sparamStr string) []string { - var checkShardCmd []string - checkShardCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkgsmshard=" + strconv.Quote(sparamStr), "--optype=gsm"} + var checkShardCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkgsmshard=" + strconv.Quote(sparamStr), "--optype=gsm"} return checkShardCmd } @@ -882,47 +1107,64 @@ func getShardDelCmd(sparams string) []string { func getLivenessCmd(resType string) []string { var livenessCmd []string if resType == "SHARD" { - livenessCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=primaryshard"} + livenessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkliveness=true", "--optype=primaryshard"} } if resType == "CATALOG" { - livenessCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=catalog"} + livenessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkliveness=true", "--optype=catalog"} } if resType == "GSM" { livenessCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=gsm"} } if resType == "STANDBY" { - livenessCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=standbyshard"} + livenessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkliveness=true", "--optype=standbyshard"} } return livenessCmd } +func getReadinessCmd(resType string) []string { + var readynessCmd []string + if resType == "SHARD" { + readynessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkreadyness=true", "--optype=primaryshard"} + } + if resType == "CATALOG" { + readynessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkreadyness=true", "--optype=catalog"} + } + if resType == "GSM" { + readynessCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkreadyness=true", "--optype=gsm"} + } + if resType == "STANDBY" { + readynessCmd = []string{oraDbScriptMount + "/cmdExec", "/bin/python", oraDbScriptMount + "/main.py ", "--checkreadyness=true", "--optype=standbyshard"} + } + return readynessCmd +} + func getGsmShardValidateCmd(shardName string) []string { - var validateCmd []string - validateCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--validateshard=" + strconv.Quote(shardName), "--optype=gsm"} + var validateCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--validateshard=" + strconv.Quote(shardName), "--optype=gsm"} return validateCmd } +func GetTdeKeyLocCmd() []string { + var tdeKeyCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--gettdekey=true", "--optype=gsm"} + return tdeKeyCmd +} + func getOnlineShardCmd(sparamStr string) []string { - var onlineCmd []string - onlineCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkonlineshard=" + strconv.Quote(sparamStr), "--optype=gsm"} + var onlineCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkonlineshard=" + strconv.Quote(sparamStr), "--optype=gsm"} return onlineCmd } func getGsmAddShardGroupCmd(sparamStr string) []string { - var addSgroupCmd []string - addSgroupCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", sparamStr, "--optype=gsm"} + var addSgroupCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", sparamStr, "--optype=gsm"} return addSgroupCmd } func getdeployShardCmd() []string { - var depCmd []string - depCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--deployshard=true", "--optype=gsm"} + var depCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--deployshard=true", "--optype=gsm"} return depCmd } func getGsmvalidateCmd() []string { - var depCmd []string - depCmd = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=gsm"} + var depCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--checkliveness=true", "--optype=gsm"} return depCmd } @@ -930,9 +1172,9 @@ func getInitContainerCmd(resType string, name string, ) string { var initCmd string if resType == "WEB" { - initCmd = "chown -R 54321:54321 " + oraScriptMount + ";chmod 755 " + oraScriptMount + "/*;chown -R 54321:54321 /opt/oracle/oradata;chmod 750 /opt/oracle/oradata" + initCmd = "chown -R 54321:54321 " + oraDbScriptMount + ";chmod 755 " + oraDbScriptMount + "/*;chown -R 54321:54321 /opt/oracle/oradata;chmod 750 /opt/oracle/oradata" } else { - initCmd = resType + ";chown -R 54321:54321 " + oraScriptMount + ";chmod 755 " + oraScriptMount + "/*;chown -R 54321:54321 /opt/oracle/oradata;chmod 750 /opt/oracle/oradata" + initCmd = resType + ";chown -R 54321:54321 " + oraDbScriptMount + ";chmod 755 " + oraDbScriptMount + "/*;chown -R 54321:54321 /opt/oracle/oradata;chmod 750 /opt/oracle/oradata" } return initCmd } @@ -948,22 +1190,28 @@ func getGsmInitContainerCmd(resType string, name string, return initCmd } +func getResetPasswdCmd(sparamStr string) []string { + var resetPasswdCmd []string = []string{oraScriptMount + "/cmdExec", "/bin/python", oraScriptMount + "/main.py ", "--resetpassword=true"} + return resetPasswdCmd +} + func GetFmtStr(pstr string, ) string { return "[" + pstr + "]" } -func ReadConfigMap(cmName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func ReadConfigMap(cmName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) (string, string, string, string, string, string) { var region, fingerprint, user, tenancy, passphrase, str1, topicid, k, value string - cm := &corev1.ConfigMap{} var err error + cm := &corev1.ConfigMap{} + //var err error // Reding a config map err = kClient.Get(context.TODO(), types.NamespacedName{ Name: cmName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, cm) if err != nil { @@ -986,36 +1234,37 @@ func ReadConfigMap(cmName string, instance *databasealphav1.ShardingDatabase, kC value = line[s+1:] LogMessages("DEBUG", "Key : "+GetFmtStr(k)+" Value : "+GetFmtStr(value), nil, instance, logger) - if k == "region" { + switch k { + case "region": region = value - } else if k == "fingerprint" { + case "fingerprint": fingerprint = value - } else if k == "user" { + case "user": user = value - } else if k == "tenancy" { + case "tenancy": tenancy = value - } else if k == "passpharase" { + case "passpharase": passphrase = value - } else if k == "topicid" { + case "topicid": topicid = value - } else { + default: LogMessages("DEBUG", GetFmtStr(k)+" is not matching with any required value for ONS.", nil, instance, logger) } } return region, user, tenancy, passphrase, fingerprint, topicid } -func ReadSecret(secName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func ReadSecret(secName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) string { var value string sc := &corev1.Secret{} - var err error + //var err error // Reading a Secret - err = kClient.Get(context.TODO(), types.NamespacedName{ + var err error = kClient.Get(context.TODO(), types.NamespacedName{ Name: secName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, sc) if err != nil { @@ -1037,20 +1286,17 @@ func GetK8sClientConfig(kClient client.Client) (clientcmd.ClientConfig, kubernet var kubeConfig clientcmd.ClientConfig var kubeClient kubernetes.Interface - databasealphav1.KubeConfigOnce.Do(func() { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} - kubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - err1 = err - } - kubeClient, err = kubernetes.NewForConfig(config) - if err != nil { - err1 = err - } - - }) + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + configOverrides := &clientcmd.ConfigOverrides{} + kubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + err1 = err + } + kubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + err1 = err + } return kubeConfig, kubeClient, err1 } @@ -1064,10 +1310,10 @@ func Contains(list []string, s string) bool { } // Function to check shadrd in GSM -func CheckShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getShardCheckCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getShardCheckCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Did not find the shard " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1077,12 +1323,12 @@ func CheckShardInGsm(gsmPodName string, sparams string, instance *databasealphav } // Function to check the online Shard -func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getOnlineShardCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getOnlineShardCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { - msg := "Shard: " + GetFmtStr(sparams) + " is not onine in GSM." + msg := "Shard: " + GetFmtStr(sparams) + " is not online in GSM." LogMessages("INFO", msg, nil, instance, logger) return err } @@ -1090,10 +1336,10 @@ func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *database } // Function to move the chunks -func MoveChunks(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func MoveChunks(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getMoveChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getMoveChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred in during Chunk movement command submission for shard: " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1103,9 +1349,9 @@ func MoveChunks(gsmPodName string, sparams string, instance *databasealphav1.Sha } // Function to verify the chunks -func VerifyChunks(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func VerifyChunks(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getNoChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getNoChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Chunks are not moved completely from the shard: " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1115,9 +1361,9 @@ func VerifyChunks(gsmPodName string, sparams string, instance *databasealphav1.S } // Function to verify the chunks -func AddShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func AddShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getShardAddCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getShardAddCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while adding a shard " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1127,9 +1373,9 @@ func AddShardInGsm(gsmPodName string, sparams string, instance *databasealphav1. } // Function to deploy the Shards -func DeployShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func DeployShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getdeployShardCmd(), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getdeployShardCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while deploying the shard in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1139,9 +1385,9 @@ func DeployShardInGsm(gsmPodName string, sparams string, instance *databasealpha } // Function to verify the chunks -func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getCancelChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getCancelChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while cancelling the chunks: " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1151,9 +1397,9 @@ func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasealph } // Function to delete the shard -func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { - err, _, _ := ExecCommand(gsmPodName, getShardDelCmd(sparams), kubeClient, kubeconfig, instance, logger) + _, _, err := ExecCommand(gsmPodName, getShardDelCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while cancelling the chunks: " + GetFmtStr(sparams) + " in GSM." LogMessages("INFO", msg, nil, instance, logger) @@ -1162,20 +1408,20 @@ func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasealp return nil } -func GetSvcIp(PodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, -) (error, string, string) { - err, stdoutput, stderror := ExecCommand(PodName, GetIpCmd(sparams), kubeClient, kubeconfig, instance, logger) +func GetSvcIp(PodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +) (string, string, error) { + stdoutput, stderror, err := ExecCommand(PodName, GetIpCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while getting the IP for k8s service " + GetFmtStr(sparams) LogMessages("INFO", msg, nil, instance, logger) - return err, strings.Replace(stdoutput, "\r\n", "", -1), strings.Replace(stderror, "/r/n", "", -1) + return strings.Replace(stdoutput, "\r\n", "", -1), strings.Replace(stderror, "/r/n", "", -1), err } - return nil, strings.Replace(stdoutput, "\r\n", "", -1), strings.Replace(stderror, "/r/n", "", -1) + return strings.Replace(stdoutput, "\r\n", "", -1), strings.Replace(stderror, "/r/n", "", -1), nil } -func GetGsmServices(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetGsmServices(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { - err, stdoutput, _ := ExecCommand(PodName, getGsmSvcCmd(), kubeClient, kubeconfig, instance, logger) + stdoutput, _, err := ExecCommand(PodName, getGsmSvcCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while getting the services from the GSM " LogMessages("DEBUG", msg, err, instance, logger) @@ -1184,9 +1430,9 @@ func GetGsmServices(PodName string, instance *databasealphav1.ShardingDatabase, return stdoutput } -func GetDbRole(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetDbRole(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { - err, stdoutput, _ := ExecCommand(PodName, getDbRoleCmd(), kubeClient, kubeconfig, instance, logger) + stdoutput, _, err := ExecCommand(PodName, getDbRoleCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while getting the DB role from the database" LogMessages("DEBUG", msg, err, instance, logger) @@ -1195,9 +1441,9 @@ func GetDbRole(PodName string, instance *databasealphav1.ShardingDatabase, kubeC return strings.TrimSpace(stdoutput) } -func GetDbOpenMode(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetDbOpenMode(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { - err, stdoutput, _ := ExecCommand(PodName, getDbModeCmd(), kubeClient, kubeconfig, instance, logger) + stdoutput, _, err := ExecCommand(PodName, getDbModeCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { msg := "Error occurred while getting the DB mode from the database" LogMessages("DEBUG", msg, err, instance, logger) @@ -1206,7 +1452,7 @@ func GetDbOpenMode(PodName string, instance *databasealphav1.ShardingDatabase, k return strings.TrimSpace(stdoutput) } -func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, instance *databasev4.ShardingDatabase, kClient client.Client, ) error { //var msg string @@ -1214,7 +1460,7 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta var err error sfsetCopy := sfSetFound.DeepCopy() - sfsetCopy.Labels[string(databasealphav1.ShardingDelLabelKey)] = string(databasealphav1.ShardingDelLabelTrueValue) + sfsetCopy.Labels[string(databasev4.ShardingDelLabelKey)] = string(databasev4.ShardingDelLabelTrueValue) patch := client.MergeFrom(sfSetFound) err = kClient.Patch(context.Background(), sfsetCopy, patch) if err != nil { @@ -1222,7 +1468,7 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta } podCopy := sfSetPod.DeepCopy() - podCopy.Labels[string(databasealphav1.ShardingDelLabelKey)] = string(databasealphav1.ShardingDelLabelTrueValue) + podCopy.Labels[string(databasev4.ShardingDelLabelKey)] = string(databasev4.ShardingDelLabelTrueValue) podPatch := client.MergeFrom(sfSetPod.DeepCopy()) err = kClient.Patch(context.Background(), podCopy, podPatch) if err != nil { @@ -1232,9 +1478,31 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta return nil } +func InstanceShardPatch(obj client.Object, instance *databasev4.ShardingDatabase, kClient client.Client, id int32, field string, value string, +) error { + + var err error + instSpec := instance.Spec + instSpec.Shard[id].IsDelete = "failed" + instshardM, _ := json.Marshal(struct { + Spec *databasev4.ShardingDatabaseSpec `json:"spec":` + }{ + Spec: &instSpec, + }) + + patch1 := client.RawPatch(types.MergePatchType, instshardM) + err = kClient.Patch(context.TODO(), obj, patch1) + + if err != nil { + return err + } + + return err +} + // Send Notification -func SendNotification(title string, body string, instance *databasealphav1.ShardingDatabase, topicId string, rclient ons.NotificationDataPlaneClient, logger logr.Logger, +func SendNotification(title string, body string, instance *databasev4.ShardingDatabase, topicId string, rclient ons.NotificationDataPlaneClient, logger logr.Logger, ) { var msg string req := ons.PublishMessageRequest{TopicId: common.String(topicId), @@ -1250,3 +1518,31 @@ func SendNotification(title string, body string, instance *databasealphav1.Shard LogMessages("DEBUG", msg, nil, instance, logger) } } + +func GetSecretMount() string { + return oraSecretMount +} + +func checkTdeWalletFlag(instance *databasev4.ShardingDatabase) bool { + if strings.ToLower(instance.Spec.IsTdeWallet) == "enable" { + return true + } + return false +} + +func CheckIsDeleteFlag(delStr string, instance *databasev4.ShardingDatabase, logger logr.Logger) bool { + if strings.ToLower(delStr) == "enable" { + return true + } + if strings.ToLower(delStr) == "failed" { + // LogMessages("INFO", "manual intervention required", nil, instance, logger) + } + return false +} + +func getTdeWalletMountLoc(instance *databasev4.ShardingDatabase) string { + if len(instance.Spec.TdeWalletPvcMountLocation) > 0 { + return instance.Spec.TdeWalletPvcMountLocation + } + return "/tdewallet/" + instance.Name +} diff --git a/commons/sharding/shard.go b/commons/sharding/shard.go index 08ba490f..e48b56dd 100644 --- a/commons/sharding/shard.go +++ b/commons/sharding/shard.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,10 +40,11 @@ package commons import ( "context" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" "reflect" "strconv" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -53,7 +54,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func buildLabelsForShard(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForShard(instance *databasev4.ShardingDatabase, label string, shardName string) map[string]string { return map[string]string{ "app": "OracleSharding", "type": "Shard", @@ -61,7 +62,7 @@ func buildLabelsForShard(instance *databasev1alpha1.ShardingDatabase, label stri } } -func getLabelForShard(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForShard(instance *databasev4.ShardingDatabase) string { // if len(OraShardSpex.Label) !=0 { // return OraShardSpex.Label @@ -70,7 +71,7 @@ func getLabelForShard(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *appsv1.StatefulSet { +func BuildStatefulSetForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForShard(), ObjectMeta: builObjectMetaForShard(instance, OraShardSpex), @@ -91,30 +92,29 @@ func buildTypeMetaForShard() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) metav1.ObjectMeta { +func builObjectMetaForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraShardSpex.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs - var size int32 - size = 1 + var size int32 = 1 sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraShardSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForShard(instance, "sharding"), + MatchLabels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Spec: *buildPodSpecForShard(instance, OraShardSpex), }, @@ -132,7 +132,7 @@ func buildStatefulSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraS // Function to build PodSpec -func buildPodSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *corev1.PodSpec { +func buildPodSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -141,10 +141,14 @@ func buildPodSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardS RunAsUser: &user, FSGroup: &group, }, - InitContainers: buildInitContainerSpecForShard(instance, OraShardSpex), - Containers: buildContainerSpecForShard(instance, OraShardSpex), - Volumes: buildVolumeSpecForShard(instance, OraShardSpex), + Containers: buildContainerSpecForShard(instance, OraShardSpex), + Volumes: buildVolumeSpecForShard(instance, OraShardSpex), + } + + if (instance.Spec.IsDownloadScripts) && (instance.Spec.ScriptsLocation != "") { + spec.InitContainers = buildInitContainerSpecForShard(instance, OraShardSpex) } + if len(instance.Spec.DbImagePullSecret) > 0 { spec.ImagePullSecrets = []corev1.LocalObjectReference{ { @@ -164,23 +168,17 @@ func buildPodSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardS } // Function to build Volume Spec -func buildVolumeSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Volume { +func buildVolumeSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { Name: OraShardSpex.Name + "secretmap-vol3", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: instance.Spec.Secret, + SecretName: instance.Spec.DbSecret.Name, }, }, }, - { - Name: OraShardSpex.Name + "orascript-vol5", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, { Name: OraShardSpex.Name + "oradshm-vol6", VolumeSource: corev1.VolumeSource{ @@ -196,12 +194,21 @@ func buildVolumeSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraSha if len(instance.Spec.StagePvcName) != 0 { result = append(result, corev1.Volume{Name: OraShardSpex.Name + "orastage-vol7", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.StagePvcName}}}) } + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.Volume{Name: OraShardSpex.Name + "orascript-vol5", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) + } + + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.Volume{Name: OraShardSpex.Name + "shared-storage-vol8", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.TdeWalletPvc}}}) + } + } return result } // Function to build the container Specification -func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Container { +func buildContainerSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Container { // building Continer spec var result []corev1.Container containerSpec := corev1.Container{ @@ -209,7 +216,7 @@ func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, Ora Image: instance.Spec.DbImage, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_RAW"}, + Add: []corev1.Capability{corev1.Capability("NET_ADMIN"), corev1.Capability("SYS_NICE")}, }, }, Resources: corev1.ResourceRequirements{ @@ -218,28 +225,50 @@ func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, Ora VolumeMounts: buildVolumeMountSpecForShard(instance, OraShardSpex), LivenessProbe: &corev1.Probe{ // TODO: Investigate if it's ok to call status every 10 seconds - FailureThreshold: int32(30), - PeriodSeconds: int32(240), - InitialDelaySeconds: int32(300), - TimeoutSeconds: int32(120), - Handler: corev1.Handler{ + FailureThreshold: int32(3), + InitialDelaySeconds: int32(30), + PeriodSeconds: func() int32 { + if instance.Spec.LivenessCheckPeriod > 0 { + return int32(instance.Spec.LivenessCheckPeriod) + } + return 60 + }(), + TimeoutSeconds: int32(30), + ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: getLivenessCmd("SHARD"), + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, }, }, }, /** + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + //Command: getReadinessCmd("SHARD"), + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, + }, + }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if instance.Spec.ReadinessCheckPeriod > 0 { + return int32(instance.Spec.ReadinessCheckPeriod) + } + return 60 + }(), + }, + **/ // Disabling this because ping stop working and sharding topologu never gets configured. StartupProbe: &corev1.Probe{ - FailureThreshold: int32(30), - PeriodSeconds: int32(180), - Handler: corev1.Handler{ + FailureThreshold: int32(30), + PeriodSeconds: int32(180), + InitialDelaySeconds: int32(30), + ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: getLivenessCmd("SHARD"), + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, }, }, }, - **/ Env: buildEnvVarsSpec(instance, OraShardSpex.EnvVars, OraShardSpex.Name, "SHARD", false, "NONE"), } @@ -257,10 +286,10 @@ func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, Ora return result } -//Function to build the init Container Spec -func buildInitContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Container { +// Function to build the init Container Spec +func buildInitContainerSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Container { var result []corev1.Container - privFlag := true + privFlag := false var uid int64 = 0 // building the init Container Spec @@ -296,21 +325,33 @@ func buildInitContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, return result } -func buildVolumeMountSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "-oradata-vol4", MountPath: oraDataMount}) - result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "orascript-vol5", MountPath: oraScriptMount}) + if instance.Spec.IsDownloadScripts { + result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "orascript-vol5", MountPath: oraDbScriptMount}) + } result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "oradshm-vol6", MountPath: oraShm}) if len(instance.Spec.StagePvcName) != 0 { result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "orastage-vol7", MountPath: oraStage}) } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + result = append(result, corev1.VolumeMount{Name: instance.Name + "shared-storage" + instance.Spec.Catalog[0].Name + "-0", MountPath: getTdeWalletMountLoc(instance)}) + } else { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "shared-storage-vol8", MountPath: getTdeWalletMountLoc(instance)}) + } + } + } + return result } -func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -322,16 +363,16 @@ func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, O { ObjectMeta: metav1.ObjectMeta{ Name: OraShardSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteOnce, }, StorageClassName: &instance.Spec.StorageClass, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse(strconv.FormatInt(int64(OraShardSpex.StorageSizeInGb), 10) + "Gi"), }, @@ -354,9 +395,9 @@ func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, O return claims } -func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec, svctype string) *corev1.Service { - service := &corev1.Service{} - service = &corev1.Service{ +func BuildServiceDefForShard(instance *databasev4.ShardingDatabase, replicaCount int32, OraShardSpex databasev4.ShardSpec, svctype string) *corev1.Service { + //service := &corev1.Service{} + service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForShard(instance, replicaCount, OraShardSpex, svctype), Spec: corev1.ServiceSpec{}, } @@ -369,7 +410,7 @@ func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replic if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForShard(instance, "sharding") + service.Spec.Selector = getSvcLabelsForShard(replicaCount, OraShardSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -378,7 +419,7 @@ func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replic } // Function to build Service ObjectMeta -func buildSvcObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForShard(instance *databasev4.ShardingDatabase, replicaCount int32, OraShardSpex databasev4.ShardSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string @@ -393,17 +434,16 @@ func buildSvcObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, rep objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForShard(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } -func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec) map[string]string { +func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev4.ShardSpec) map[string]string { - var labelStr map[string]string - labelStr = make(map[string]string) + var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { labelStr["statefulset.kubernetes.io/pod-name"] = OraShardSpex.Name + "-0" } else { @@ -415,11 +455,11 @@ func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev1alpha1.Shar } // ======================== Update Section ======================== -func UpdateProvForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec, kClient client.Client, sfSet *appsv1.StatefulSet, shardPod *corev1.Pod, logger logr.Logger, +func UpdateProvForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec, kClient client.Client, sfSet *appsv1.StatefulSet, shardPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var msg string - var size int32 - size = 1 + var size int32 = 1 + //size = 1 var isUpdate bool = false var err error var i int @@ -440,7 +480,7 @@ func UpdateProvForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpe oraSpexRes := OraShardSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml index 3aadc474..6b331894 100644 --- a/config/certmanager/certificate.yaml +++ b/config/certmanager/certificate.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml index 8dd6746c..0a7c8089 100644 --- a/config/certmanager/kustomization.yaml +++ b/config/certmanager/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml index f8e232fa..ce54850c 100644 --- a/config/certmanager/kustomizeconfig.yaml +++ b/config/certmanager/kustomizeconfig.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/crd/bases/database.oracle.com_DbcsSystem.yaml b/config/crd/bases/database.oracle.com_DbcsSystem.yaml new file mode 100644 index 00000000..e933d5a4 --- /dev/null +++ b/config/crd/bases/database.oracle.com_DbcsSystem.yaml @@ -0,0 +1,240 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: DbcsSystem.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: DbcsSystem + singular: dbcssystem + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: DbcsSystem is the Schema for the dbcssystems API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DbcsSystemSpec defines the desired state of DbcsSystem + properties: + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + description: DB Backup COnfig Network Struct + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - sshPublicKeys + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + ociConfigMap: + type: string + ociSecret: + type: string + required: + - ociConfigMap + type: object + status: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbEdition: + type: string + dbInfo: + items: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml b/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml new file mode 100644 index 00000000..1e078b63 --- /dev/null +++ b/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml @@ -0,0 +1,159 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomouscontainerdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousContainerDatabase + listKind: AutonomousContainerDatabaseList + plural: autonomouscontainerdatabases + shortNames: + - acd + - acds + singular: autonomouscontainerdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + properties: + lifecycleState: + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + properties: + lifecycleState: + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml b/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml new file mode 100644 index 00000000..b0d6f8ed --- /dev/null +++ b/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml @@ -0,0 +1,201 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomousdatabasebackups.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseBackup + listKind: AutonomousDatabaseBackupList + plural: autonomousdatabasebackups + shortNames: + - adbbu + - adbbus + singular: autonomousdatabasebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + type: string + timeEnded: + type: string + timeStarted: + type: string + type: + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + type: string + timeEnded: + type: string + timeStarted: + type: string + type: + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml b/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml new file mode 100644 index 00000000..3bfc5a4e --- /dev/null +++ b/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomousdatabaserestores.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseRestore + listKind: AutonomousDatabaseRestoreList + plural: autonomousdatabaserestores + shortNames: + - adbr + - adbrs + singular: autonomousdatabaserestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: + type: string + displayName: + type: string + status: + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: + type: string + displayName: + type: string + status: + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_autonomousdatabases.yaml b/config/crd/bases/database.oracle.com_autonomousdatabases.yaml index 51b0b6ab..1672ae81 100644 --- a/config/crd/bases/database.oracle.com_autonomousdatabases.yaml +++ b/config/crd/bases/database.oracle.com_autonomousdatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabases.database.oracle.com spec: group: database.oracle.com @@ -20,22 +18,25 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .status.displayName + - jsonPath: .spec.details.displayName name: Display Name type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string - jsonPath: .status.lifecycleState name: State type: string - - jsonPath: .status.isDedicated + - jsonPath: .spec.details.isDedicated name: Dedicated type: string - - jsonPath: .status.cpuCoreCount + - jsonPath: .spec.details.cpuCoreCount name: OCPUs type: integer - - jsonPath: .status.dataStorageSizeInTBs + - jsonPath: .spec.details.dataStorageSizeInTBs name: Storage (TB) type: integer - - jsonPath: .status.dbWorkload + - jsonPath: .spec.details.dbWorkload name: Workload Type type: string - jsonPath: .status.timeCreated @@ -44,39 +45,67 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabase is the Schema for the autonomousdatabases - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase - Important: Run "make" to regenerate code after modifying this file' properties: - details: - description: AutonomousDatabaseDetails defines the detail information - of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: properties: adminPassword: properties: - k8sSecretName: - type: string - ociSecretOCID: - type: string + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object type: object - autonomousDatabaseOCID: + cloneType: + enum: + - FULL + - METADATA type: string - compartmentOCID: + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU type: string cpuCoreCount: type: integer @@ -87,8 +116,6 @@ spec: dbVersion: type: string dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying - type: string' enum: - OLTP - DW @@ -101,38 +128,125 @@ spec: additionalProperties: type: string type: object + isAccessControlEnabled: + type: boolean isAutoScalingEnabled: type: boolean isDedicated: type: boolean - lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying - type: string' + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE type: string - nsgOCIDs: + nsgIds: items: type: string type: array - privateEndpoint: - type: string - privateEndpointIP: - type: string + ocpuCount: + type: number privateEndpointLabel: type: string - subnetOCID: + subnetId: type: string - wallet: + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: properties: - name: - type: string - password: + k8sSecret: properties: - k8sSecretName: + name: type: string - ociSecretOCID: + type: object + ociSecret: + properties: + id: type: string type: object type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array type: object hardLink: default: false @@ -144,42 +258,428 @@ spec: secretName: type: string type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object required: - - details + - action type: object status: - description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase properties: - cpuCoreCount: - type: integer - dataStorageSizeInTBs: - type: integer - dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying - type: string' + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: type: string - displayName: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' + timeCreated: type: string - isDedicated: + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone type: string + clone: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + cloneType: + enum: + - FULL + - METADATA + type: string + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object + required: + - action + type: object + status: + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying - type: string' type: string timeCreated: type: string + walletExpiringDate: + type: string type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_cdbs.yaml b/config/crd/bases/database.oracle.com_cdbs.yaml new file mode 100644 index 00000000..924946ee --- /dev/null +++ b/config/crd/bases/database.oracle.com_cdbs.yaml @@ -0,0 +1,492 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: cdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: CDB + listKind: CDBList + plural: cdbs + singular: cdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_dataguardbrokers.yaml b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml new file mode 100644 index 00000000..0e27126d --- /dev/null +++ b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml @@ -0,0 +1,210 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: dataguardbrokers.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DataguardBroker + listKind: DataguardBrokerList + plural: dataguardbrokers + singular: dataguardbroker + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_dbcssystems.yaml b/config/crd/bases/database.oracle.com_dbcssystems.yaml new file mode 100644 index 00000000..468d7612 --- /dev/null +++ b/config/crd/bases/database.oracle.com_dbcssystems.yaml @@ -0,0 +1,758 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: dbcssystems.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: dbcssystems + singular: dbcssystem + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPasswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPasswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPasswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_lrests.yaml b/config/crd/bases/database.oracle.com_lrests.yaml new file mode 100644 index 00000000..c20356e7 --- /dev/null +++ b/config/crd/bases/database.oracle.com_lrests.yaml @@ -0,0 +1,254 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrests.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LREST + listKind: LRESTList + plural: lrests + singular: lrest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the LREST + jsonPath: .spec.cdbName + name: CDB NAME + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the LREST Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message if any + jsonPath: .status.msg + name: Message + type: string + - description: string of the tnsalias + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + lrestImage: + type: string + lrestImagePullPolicy: + enum: + - Always + - Never + type: string + lrestImagePullSecret: + type: string + lrestPort: + type: integer + lrestPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_lrpdbs.yaml b/config/crd/bases/database.oracle.com_lrpdbs.yaml new file mode 100644 index 00000000..14ad7f29 --- /dev/null +++ b/config/crd/bases/database.oracle.com_lrpdbs.yaml @@ -0,0 +1,369 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrpdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LRPDB + listKind: LRPDBList + plural: lrpdbs + singular: lrpdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the LRPDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: last sqlcode + jsonPath: .status.sqlCode + name: last sqlcode + type: integer + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + - Alter + - Noaction + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbPass: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + alterSystem: + type: string + alterSystemParameter: + type: string + alterSystemValue: + type: string + asClone: + type: boolean + assertiveLrpdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + lrpdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + parameterScope: + type: string + pdbName: + type: string + pdbState: + enum: + - OPEN + - CLOSE + - ALTER + type: string + pdbconfigmap: + type: string + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + - alterSystemParameter + - alterSystemValue + - webServerPwd + type: object + status: + properties: + action: + type: string + alterSystem: + type: string + bitstat: + type: integer + bitstatstr: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + sqlCode: + type: integer + status: + type: boolean + totalSize: + type: string + required: + - phase + - sqlCode + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml b/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml new file mode 100644 index 00000000..fe93a531 --- /dev/null +++ b/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml @@ -0,0 +1,362 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: oraclerestdataservices.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OracleRestDataService + listKind: OracleRestDataServiceList + plural: oraclerestdataservices + singular: oraclerestdataservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_ordssrvs.yaml b/config/crd/bases/database.oracle.com_ordssrvs.yaml new file mode 100644 index 00000000..9c4ab88f --- /dev/null +++ b/config/crd/bases/database.oracle.com_ordssrvs.yaml @@ -0,0 +1,488 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: ordssrvs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OrdsSrvs + listKind: OrdsSrvsList + plural: ordssrvs + singular: ordssrvs + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: status + type: string + - jsonPath: .status.workloadType + name: workloadType + type: string + - jsonPath: .status.ordsVersion + name: ordsVersion + type: string + - jsonPath: .status.httpPort + name: httpPort + type: integer + - jsonPath: .status.httpsPort + name: httpsPort + type: integer + - jsonPath: .status.mongoPort + name: MongoPort + type: integer + - jsonPath: .status.restartRequired + name: restartRequired + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.ordsInstalled + name: OrdsInstalled + type: boolean + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + encPrivKey: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + forceRestart: + type: boolean + globalSettings: + properties: + cache.metadata.enabled: + type: boolean + cache.metadata.graphql.expireAfterAccess: + format: int64 + type: integer + cache.metadata.graphql.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.enabled: + type: boolean + cache.metadata.jwks.expireAfterAccess: + format: int64 + type: integer + cache.metadata.jwks.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.initialCapacity: + format: int32 + type: integer + cache.metadata.jwks.maximumSize: + format: int32 + type: integer + cache.metadata.timeout: + format: int64 + type: integer + certSecret: + properties: + cert: + type: string + key: + type: string + secretName: + type: string + required: + - cert + - key + - secretName + type: object + database.api.enabled: + type: boolean + database.api.management.services.disabled: + type: boolean + db.invalidPoolTimeout: + format: int64 + type: integer + debug.printDebugToScreen: + type: boolean + enable.mongo.access.log: + default: false + type: boolean + enable.standalone.access.log: + default: false + type: boolean + error.responseFormat: + type: string + feature.grahpql.max.nesting.depth: + format: int32 + type: integer + icap.port: + format: int32 + type: integer + icap.secure.port: + format: int32 + type: integer + icap.server: + type: string + log.procedure: + type: boolean + mongo.enabled: + type: boolean + mongo.idle.timeout: + format: int64 + type: integer + mongo.op.timeout: + format: int64 + type: integer + mongo.port: + default: 27017 + format: int32 + type: integer + request.traceHeaderName: + type: string + security.credentials.attempts: + format: int32 + type: integer + security.credentials.lock.time: + format: int64 + type: integer + security.disableDefaultExclusionList: + type: boolean + security.exclusionList: + type: string + security.externalSessionTrustedOrigins: + type: string + security.forceHTTPS: + type: boolean + security.httpsHeaderCheck: + type: string + security.inclusionList: + type: string + security.maxEntries: + format: int32 + type: integer + security.verifySSL: + type: boolean + standalone.context.path: + default: /ords + type: string + standalone.http.port: + default: 8080 + format: int32 + type: integer + standalone.https.host: + type: string + standalone.https.port: + default: 8443 + format: int32 + type: integer + standalone.stop.timeout: + format: int64 + type: integer + type: object + image: + type: string + imagePullPolicy: + default: IfNotPresent + enum: + - IfNotPresent + - Always + - Never + type: string + imagePullSecrets: + type: string + poolSettings: + items: + properties: + apex.security.administrator.roles: + type: string + apex.security.user.roles: + type: string + autoUpgradeAPEX: + default: false + type: boolean + autoUpgradeORDS: + default: false + type: boolean + db.adminUser: + type: string + db.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.cdb.adminUser: + type: string + db.cdb.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.connectionType: + enum: + - basic + - tns + - customurl + type: string + db.credentialsSource: + enum: + - pool + - request + type: string + db.customURL: + type: string + db.hostname: + type: string + db.poolDestroyTimeout: + format: int64 + type: integer + db.port: + format: int32 + type: integer + db.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.servicename: + type: string + db.sid: + type: string + db.tnsAliasName: + type: string + db.username: + default: ORDS_PUBLIC_USER + type: string + db.wallet.zip.service: + type: string + dbWalletSecret: + properties: + secretName: + type: string + walletName: + type: string + required: + - secretName + - walletName + type: object + debug.trackResources: + type: boolean + feature.openservicebroker.exclude: + type: boolean + feature.sdw: + type: boolean + http.cookie.filter: + type: string + jdbc.DriverType: + enum: + - thin + - oci8 + type: string + jdbc.InactivityTimeout: + format: int32 + type: integer + jdbc.InitialLimit: + format: int32 + type: integer + jdbc.MaxConnectionReuseCount: + format: int32 + type: integer + jdbc.MaxConnectionReuseTime: + format: int32 + type: integer + jdbc.MaxLimit: + format: int32 + type: integer + jdbc.MaxStatementsLimit: + format: int32 + type: integer + jdbc.MinLimit: + format: int32 + type: integer + jdbc.SecondsToTrustIdleConnection: + format: int32 + type: integer + jdbc.auth.admin.role: + type: string + jdbc.auth.enabled: + type: boolean + jdbc.cleanup.mode: + type: string + jdbc.statementTimeout: + format: int32 + type: integer + misc.defaultPage: + type: string + misc.pagination.maxRows: + format: int32 + type: integer + owa.trace.sql: + type: boolean + plsql.gateway.mode: + enum: + - disabled + - direct + - proxied + type: string + poolName: + type: string + procedure.preProcess: + type: string + procedure.rest.preHook: + type: string + procedurePostProcess: + type: string + restEnabledSql.active: + type: boolean + security.jwks.connection.timeout: + format: int64 + type: integer + security.jwks.read.timeout: + format: int64 + type: integer + security.jwks.refresh.interval: + format: int64 + type: integer + security.jwks.size: + format: int32 + type: integer + security.jwt.allowed.age: + format: int64 + type: integer + security.jwt.allowed.skew: + format: int64 + type: integer + security.jwt.profile.enabled: + type: boolean + security.requestAuthenticationFunction: + type: string + security.requestValidationFunction: + default: ords_util.authorize_plsql_gateway + type: string + security.validationFunctionType: + enum: + - plsql + - javascript + type: string + soda.defaultLimit: + type: string + soda.maxLimit: + type: string + tnsAdminSecret: + properties: + secretName: + type: string + required: + - secretName + type: object + required: + - db.secret + - poolName + type: object + type: array + replicas: + default: 1 + format: int32 + minimum: 1 + type: integer + workloadType: + default: Deployment + enum: + - Deployment + - StatefulSet + - DaemonSet + type: string + required: + - globalSettings + - image + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + httpPort: + format: int32 + type: integer + httpsPort: + format: int32 + type: integer + mongoPort: + format: int32 + type: integer + ordsInstalled: + type: boolean + ordsVersion: + type: string + restartRequired: + type: boolean + status: + type: string + workloadType: + type: string + required: + - restartRequired + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_pdbs.yaml b/config/crd/bases/database.oracle.com_pdbs.yaml new file mode 100644 index 00000000..b2f37ac9 --- /dev/null +++ b/config/crd/bases/database.oracle.com_pdbs.yaml @@ -0,0 +1,634 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: pdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: PDB + listKind: PDBList + plural: pdbs + singular: pdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_shardingdatabases.yaml b/config/crd/bases/database.oracle.com_shardingdatabases.yaml index 17dd4e23..90c6dd53 100644 --- a/config/crd/bases/database.oracle.com_shardingdatabases.yaml +++ b/config/crd/bases/database.oracle.com_shardingdatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: shardingdatabases.database.oracle.com spec: group: database.oracle.com @@ -16,34 +14,36 @@ spec: singular: shardingdatabase scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v1alpha1 schema: openAPIV3Schema: - description: ShardingDatabase is the Schema for the shardingdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: ShardingDatabaseSpec defines the desired state of ShardingDatabase properties: + InvitedNodeSubnet: + type: string catalog: items: - description: CatalogSpec defines the desired state of CatalogSpec properties: envVars: items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -55,11 +55,9 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -79,9 +77,21 @@ spec: pvcName: type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -89,8 +99,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -99,11 +107,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object storageSizeInGb: @@ -113,18 +116,45 @@ spec: - name type: object type: array + dbEdition: + type: string dbImage: type: string dbImagePullSecret: type: string + dbSecret: + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string gsm: items: - description: GsmSpec defines the desired state of GsmSpec properties: + directorName: + type: string envVars: items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -136,11 +166,9 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -149,19 +177,34 @@ spec: additionalProperties: type: string type: object + pvAnnotations: + additionalProperties: + type: string + type: object pvMatchLabels: additionalProperties: type: string type: object pvcName: type: string - replicas: - format: int32 - type: integer + region: + type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -169,8 +212,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -179,11 +220,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object storageSizeInGb: @@ -193,10 +229,107 @@ spec: - name type: object type: array + gsmDevMode: + type: string gsmImage: type: string gsmImagePullSecret: type: string + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string isClone: type: boolean isDataGuard: @@ -205,24 +338,567 @@ spec: type: boolean isDeleteOraPvc: type: boolean + isDownloadScripts: + type: boolean isExternalSvc: type: boolean - namespace: + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + properties: + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + properties: + directorName: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: type: string - nsConfigMap: + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: type: string - nsSecret: + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: type: string + liveinessCheckPeriod: + type: integer portMappings: items: - description: PortMapping is a specification of port mapping for - an application deployment. properties: port: format: int32 type: integer protocol: - default: TCP type: string targetPort: format: int32 @@ -233,21 +909,19 @@ spec: - targetPort type: object type: array - scriptsLocation: + readinessCheckPeriod: + type: integer + replicationType: type: string - secret: + scriptsLocation: type: string shard: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' items: - description: ShardSpec is a specification of Shards for an application - deployment. properties: + deployAs: + type: string envVars: items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -259,11 +933,14 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: - type: boolean + enum: + - enable + - disable + - failed + - force + type: string label: type: string name: @@ -283,9 +960,21 @@ spec: pvcName: type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -293,8 +982,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -303,13 +990,14 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string storageSizeInGb: format: int32 type: integer @@ -317,21 +1005,34 @@ spec: - name type: object type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string stagePvcName: type: string storageClass: type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string required: - catalog - dbImage - gsm - gsmImage - - secret - shard type: object status: - description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 - ShardingDatabaseStatus defines the observed state of ShardingDatabase properties: catalogs: additionalProperties: @@ -339,62 +1040,29 @@ spec: type: object conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -438,9 +1106,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml index d7abe5e0..8357f2c5 100644 --- a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml +++ b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: singleinstancedatabases.database.oracle.com spec: group: database.oracle.com @@ -20,12 +18,15 @@ spec: - jsonPath: .status.edition name: Edition type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string - jsonPath: .status.status name: Status type: string - jsonPath: .status.role name: Role - priority: 1 type: string - jsonPath: .status.releaseUpdate name: Version @@ -37,63 +38,70 @@ spec: name: Pdb Connect Str priority: 1 type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string - jsonPath: .status.oemExpressUrl name: Oem Express Url type: string name: v1alpha1 schema: openAPIV3Schema: - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase properties: adminPassword: - description: SingleInsatnceAdminPassword defines the secret containing - Admin Password mapped to secretKey for Database properties: keepSecret: type: boolean secretKey: + default: oracle_pwd type: string secretName: type: string required: - - secretKey - secretName type: object archiveLog: type: boolean charset: type: string - cloneFrom: + convertToSnapshotStandby: + type: boolean + createAs: + enum: + - primary + - standby + - clone + - truecache type: string edition: enum: - standard - enterprise + - express + - free type: string + enableTCPS: + type: boolean flashBack: type: boolean forceLog: type: boolean image: - description: SingleInstanceDatabaseImage defines the Image source - and pullSecrets for POD properties: + prebuiltDB: + type: boolean pullFrom: type: string pullSecrets: @@ -104,7 +112,6 @@ spec: - pullFrom type: object initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -115,8 +122,8 @@ spec: sgaTarget: type: integer type: object - installApex: - type: boolean + listenerPort: + type: integer loadBalancer: type: boolean nodeSelector: @@ -126,111 +133,112 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage - size and class for PVC properties: accessMode: enum: - ReadWriteOnce - ReadWriteMany type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean size: type: string storageClass: type: string - required: - - accessMode - - size - - storageClass + volumeClaimAnnotation: + type: string type: object + primaryDatabaseRef: + type: string readinessCheckPeriod: type: integer replicas: - minimum: 1 type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object sid: - description: SID can only have a-z , A-Z, 0-9 . It cant have any special - characters + maxLength: 12 pattern: ^[a-zA-Z0-9]+$ type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: + items: + type: string + type: array required: - - adminPassword - image - - persistence - - replicas type: object status: - description: SingleInstanceDatabaseStatus defines the observed state of - SingleInstanceDatabase properties: apexInstalled: type: boolean archiveLog: type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string charset: type: string - cloneFrom: + clientWalletLoc: type: string clusterConnectString: type: string conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -247,9 +255,17 @@ spec: x-kubernetes-list-type: map connectString: type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string datafilesCreated: + default: "false" type: string datafilesPatched: + default: "false" + type: string + dgBroker: type: string edition: type: string @@ -258,7 +274,6 @@ spec: forceLog: type: string initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -273,6 +288,9 @@ spec: type: integer initSgaSize: type: integer + isTcpsEnabled: + default: false + type: boolean nodes: items: type: string @@ -286,23 +304,376 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage - size and class for PVC properties: accessMode: enum: - ReadWriteOnce - ReadWriteMany type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean size: type: string storageClass: type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: false + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string required: - - accessMode - - size - - storageClass + - secretName type: object + archiveLog: + type: boolean + charset: + type: string + convertToSnapshotStandby: + type: boolean + createAs: + enum: + - primary + - standby + - clone + - truecache + type: string + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: + items: + type: string + type: array + required: + - image + type: object + status: + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBroker: + type: string + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string releaseUpdate: type: string replicas: @@ -317,9 +688,17 @@ spec: type: object status: type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string required: + - isTcpsEnabled - persistence - - replicas + - tcpsTlsSecret type: object type: object served: true @@ -329,9 +708,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/observability.oracle.com_databaseobservers.yaml b/config/crd/bases/observability.oracle.com_databaseobservers.yaml new file mode 100644 index 00000000..298f9d4e --- /dev/null +++ b/config/crd/bases/observability.oracle.com_databaseobservers.yaml @@ -0,0 +1,6988 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databaseobservers.observability.oracle.com +spec: + group: observability.oracle.com + names: + kind: DatabaseObserver + listKind: DatabaseObserverList + plural: databaseobservers + shortNames: + - dbobserver + - dbobservers + singular: databaseobserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6b3d488e..726521b0 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This kustomization.yaml is not intended to be run by itself, @@ -7,25 +7,63 @@ # It should be run by config/default resources: - bases/database.oracle.com_autonomousdatabases.yaml +- bases/database.oracle.com_autonomousdatabasebackups.yaml +- bases/database.oracle.com_autonomousdatabaserestores.yaml - bases/database.oracle.com_singleinstancedatabases.yaml - bases/database.oracle.com_shardingdatabases.yaml +- bases/database.oracle.com_pdbs.yaml +- bases/database.oracle.com_cdbs.yaml +- bases/database.oracle.com_oraclerestdataservices.yaml +- bases/database.oracle.com_autonomouscontainerdatabases.yaml +- bases/database.oracle.com_dbcssystems.yaml +- bases/database.oracle.com_dataguardbrokers.yaml +- bases/observability.oracle.com_databaseobservers.yaml +- bases/database.oracle.com_lrests.yaml +- bases/database.oracle.com_lrpdbs.yaml +- bases/database.oracle.com_ordssrvs.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_provshards.yaml -#- patches/webhook_in_autonomousdatabases.yaml #- patches/webhook_in_singleinstancedatabases.yaml #- patches/webhook_in_shardingdatabases.yaml +#- patches/webhook_in_pdbs.yaml +#- patches/webhook_in_cdbs.yaml +#- patches/webhook_in_oraclerestdataservices.yaml +#- patches/webhook_in_dbcssystems.yaml +#- patches/webhook_in_dataguardbrokers.yaml +#- patches/webhook_in_databaseobservers.yaml +- patches/webhook_in_autonomousdatabases.yaml +- patches/webhook_in_autonomousdatabasebackups.yaml +- patches/webhook_in_autonomousdatabaserestores.yaml +- patches/webhook_in_autonomouscontainerdatabases.yaml +#- patches/webhook_in_lrests.yaml +#- patches/webhook_in_lrpdbs.yaml +#- patches/webhook_in_ordssrvs.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_provshards.yaml -#- patches/cainjection_in_autonomousdatabases.yaml - patches/cainjection_in_singleinstancedatabases.yaml -#- patches/cainjection_in_shardingdatabases.yaml +- patches/cainjection_in_shardingdatabases.yaml +- patches/cainjection_in_pdbs.yaml +- patches/cainjection_in_cdbs.yaml +#- patches/cainjection_in_oraclerestdataservices.yaml +#- patches/cainjection_in_autonomouscontainerdatabases.yaml +- patches/cainjection_in_dbcssystems.yaml +#- patches/cainjection_in_dataguardbrokers.yaml +#- patches/cainjection_in_databaseobservers.yaml +- patches/cainjection_in_autonomousdatabases.yaml +- patches/cainjection_in_autonomousdatabasebackups.yaml +- patches/cainjection_in_autonomousdatabaserestores.yaml +- patches/cainjection_in_autonomouscontainerdatabases.yaml +#- patches/cainjection_in_lrests.yaml +#- patches/cainjection_in_lrpdbs.yaml +#- patches/cainjection_in_ordssrvs.yaml +#- patches/cainjection_in_singleinstancedatabases.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index fb9995dc..344a1576 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This file is for teaching kustomize how to substitute name and namespace reference in CRD diff --git a/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml b/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml new file mode 100644 index 00000000..734407bc --- /dev/null +++ b/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: autonomouscontainerdatabases.database.oracle.com diff --git a/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml b/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml new file mode 100644 index 00000000..9468569d --- /dev/null +++ b/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: autonomousdatabasebackups.database.oracle.com diff --git a/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml b/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml new file mode 100644 index 00000000..cfc941f8 --- /dev/null +++ b/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: autonomousdatabaserestores.database.oracle.com diff --git a/config/crd/patches/cainjection_in_autonomousdatabases.yaml b/config/crd/patches/cainjection_in_autonomousdatabases.yaml index 072e3f9e..05842d0b 100644 --- a/config/crd/patches/cainjection_in_autonomousdatabases.yaml +++ b/config/crd/patches/cainjection_in_autonomousdatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch adds a directive for certmanager to inject CA into the CRD diff --git a/config/crd/patches/cainjection_in_cdbs.yaml b/config/crd/patches/cainjection_in_cdbs.yaml new file mode 100644 index 00000000..8cb50343 --- /dev/null +++ b/config/crd/patches/cainjection_in_cdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: cdbs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml b/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml new file mode 100644 index 00000000..6409f54c --- /dev/null +++ b/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dataguardbrokers.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_lrests.yaml b/config/crd/patches/cainjection_in_database_lrests.yaml new file mode 100644 index 00000000..22f4b410 --- /dev/null +++ b/config/crd/patches/cainjection_in_database_lrests.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: lrests.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_lrpdbs.yaml b/config/crd/patches/cainjection_in_database_lrpdbs.yaml new file mode 100644 index 00000000..f6f21f4c --- /dev/null +++ b/config/crd/patches/cainjection_in_database_lrpdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: lrpdbs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml b/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml new file mode 100644 index 00000000..d2b5d4ee --- /dev/null +++ b/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: oraclerestdataservices.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_ordssrvs.yaml b/config/crd/patches/cainjection_in_database_ordssrvs.yaml new file mode 100644 index 00000000..d2bfc8bf --- /dev/null +++ b/config/crd/patches/cainjection_in_database_ordssrvs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ordssrvs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml b/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml new file mode 100644 index 00000000..b87b9351 --- /dev/null +++ b/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: singleinstancedatabases.database.oracle.com diff --git a/config/crd/patches/cainjection_in_dataguardbrokers.yaml b/config/crd/patches/cainjection_in_dataguardbrokers.yaml new file mode 100644 index 00000000..6409f54c --- /dev/null +++ b/config/crd/patches/cainjection_in_dataguardbrokers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dataguardbrokers.database.oracle.com diff --git a/config/crd/patches/cainjection_in_dbcssystems.yaml b/config/crd/patches/cainjection_in_dbcssystems.yaml new file mode 100644 index 00000000..1c14e1fd --- /dev/null +++ b/config/crd/patches/cainjection_in_dbcssystems.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dbcssystems.database.oracle.com diff --git a/config/crd/patches/cainjection_in_observability_databaseobservers.yaml b/config/crd/patches/cainjection_in_observability_databaseobservers.yaml new file mode 100644 index 00000000..bef0b6c0 --- /dev/null +++ b/config/crd/patches/cainjection_in_observability_databaseobservers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: databaseobservers.observability.oracle.com diff --git a/config/crd/patches/cainjection_in_oraclerestdataservices.yaml b/config/crd/patches/cainjection_in_oraclerestdataservices.yaml new file mode 100644 index 00000000..d2b5d4ee --- /dev/null +++ b/config/crd/patches/cainjection_in_oraclerestdataservices.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: oraclerestdataservices.database.oracle.com diff --git a/config/crd/patches/cainjection_in_pdbs.yaml b/config/crd/patches/cainjection_in_pdbs.yaml new file mode 100644 index 00000000..8c41010a --- /dev/null +++ b/config/crd/patches/cainjection_in_pdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: pdbs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_shardingdatabases.yaml b/config/crd/patches/cainjection_in_shardingdatabases.yaml index 6ef22218..45d35376 100644 --- a/config/crd/patches/cainjection_in_shardingdatabases.yaml +++ b/config/crd/patches/cainjection_in_shardingdatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch adds a directive for certmanager to inject CA into the CRD diff --git a/config/crd/patches/cainjection_in_singleinstancedatabases.yaml b/config/crd/patches/cainjection_in_singleinstancedatabases.yaml index 4e454c1a..11114339 100644 --- a/config/crd/patches/cainjection_in_singleinstancedatabases.yaml +++ b/config/crd/patches/cainjection_in_singleinstancedatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch adds a directive for certmanager to inject CA into the CRD diff --git a/config/crd/patches/cainjenction_in_databaseobservers.yaml b/config/crd/patches/cainjenction_in_databaseobservers.yaml new file mode 100644 index 00000000..278ea7fa --- /dev/null +++ b/config/crd/patches/cainjenction_in_databaseobservers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: databaseobservers.observability.oracle.com \ No newline at end of file diff --git a/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml b/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml new file mode 100644 index 00000000..6ef8f0a6 --- /dev/null +++ b/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: autonomouscontainerdatabases.database.oracle.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 diff --git a/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml b/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml new file mode 100644 index 00000000..ee363f8f --- /dev/null +++ b/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: autonomousdatabasebackups.database.oracle.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 \ No newline at end of file diff --git a/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml b/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml new file mode 100644 index 00000000..33329655 --- /dev/null +++ b/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: autonomousdatabaserestores.database.oracle.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 \ No newline at end of file diff --git a/config/crd/patches/webhook_in_autonomousdatabases.yaml b/config/crd/patches/webhook_in_autonomousdatabases.yaml index 55540503..c7ec554f 100644 --- a/config/crd/patches/webhook_in_autonomousdatabases.yaml +++ b/config/crd/patches/webhook_in_autonomousdatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch enables conversion webhook for CRD @@ -11,11 +11,13 @@ metadata: spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 diff --git a/config/crd/patches/webhook_in_cdbs.yaml b/config/crd/patches/webhook_in_cdbs.yaml new file mode 100644 index 00000000..9283c020 --- /dev/null +++ b/config/crd/patches/webhook_in_cdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cdbs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_databaseobservers.yaml b/config/crd/patches/webhook_in_databaseobservers.yaml new file mode 100644 index 00000000..e61411df --- /dev/null +++ b/config/crd/patches/webhook_in_databaseobservers.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: databaseobservers.observability.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/config/crd/patches/webhook_in_dataguardbrokers.yaml b/config/crd/patches/webhook_in_dataguardbrokers.yaml new file mode 100644 index 00000000..10f62234 --- /dev/null +++ b/config/crd/patches/webhook_in_dataguardbrokers.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: dataguardbrokers.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_dbcssystems.yaml b/config/crd/patches/webhook_in_dbcssystems.yaml new file mode 100644 index 00000000..69e578a3 --- /dev/null +++ b/config/crd/patches/webhook_in_dbcssystems.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: dbcssystems.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_lrests.yaml b/config/crd/patches/webhook_in_lrests.yaml new file mode 100644 index 00000000..01afd4b5 --- /dev/null +++ b/config/crd/patches/webhook_in_lrests.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: lrests.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_lrpdbs.yaml b/config/crd/patches/webhook_in_lrpdbs.yaml new file mode 100644 index 00000000..4120e72f --- /dev/null +++ b/config/crd/patches/webhook_in_lrpdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: lrpdbs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_oraclerestdataservices.yaml b/config/crd/patches/webhook_in_oraclerestdataservices.yaml new file mode 100644 index 00000000..c9398c23 --- /dev/null +++ b/config/crd/patches/webhook_in_oraclerestdataservices.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: oraclerestdataservices.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_ordssrvs.yaml b/config/crd/patches/webhook_in_ordssrvs.yaml new file mode 100644 index 00000000..0c3d7637 --- /dev/null +++ b/config/crd/patches/webhook_in_ordssrvs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ordssrvs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_pdbs.yaml b/config/crd/patches/webhook_in_pdbs.yaml new file mode 100644 index 00000000..2c41e439 --- /dev/null +++ b/config/crd/patches/webhook_in_pdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: pdbs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_shardingdatabases.yaml b/config/crd/patches/webhook_in_shardingdatabases.yaml index fccda7d0..b006a011 100644 --- a/config/crd/patches/webhook_in_shardingdatabases.yaml +++ b/config/crd/patches/webhook_in_shardingdatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch enables conversion webhook for CRD diff --git a/config/crd/patches/webhook_in_singleinstancedatabases.yaml b/config/crd/patches/webhook_in_singleinstancedatabases.yaml index 66687d8e..aecc7ba9 100644 --- a/config/crd/patches/webhook_in_singleinstancedatabases.yaml +++ b/config/crd/patches/webhook_in_singleinstancedatabases.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # The following patch enables conversion webhook for CRD diff --git a/config/database.oracle.com_DbcsSystem.yaml b/config/database.oracle.com_DbcsSystem.yaml new file mode 100644 index 00000000..c342c363 --- /dev/null +++ b/config/database.oracle.com_DbcsSystem.yaml @@ -0,0 +1,433 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + creationTimestamp: null + name: DbcsSystem.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: DbcsSystem + singular: dbcssystem + scope: Namespaced + versions: + - name: v4 + schema: + openAPIV3Schema: + description: DbcsSystem is the Schema for the dbcssystems API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DbcsSystemSpec defines the desired state of DbcsSystem + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + description: DbCloneConfig defines the configuration for the database + clone + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + description: DB Backup Config Network Struct + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + description: PDBConfig defines details of PDB struct for DBCS systems + properties: + freeformTags: + additionalProperties: + type: string + description: '// Free-form tags for this resource. Each tag + is a simple key-value pair with no predefined name, type, + or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // Example: `{"Department": "Finance"}`' + type: object + isDelete: + description: To specify whether to delete the PDB + type: boolean + pdbAdminPassword: + description: // A strong password for PDB Admin. The password + must be at least nine characters and contain at least two + uppercase, two lowercase, two numbers, and two special characters. + The special characters must be _, \#, or -. + type: string + pdbName: + description: The name for the pluggable database (PDB). The + name is unique in the context of a Database. The name must + begin with an alphabetic character and can contain a maximum + of thirty alphanumeric characters. Special characters are + not permitted. The pluggable database name should not be same + as the container database name. + type: string + pluggableDatabaseId: + description: The OCID of the PDB for deletion purposes. + type: string + shouldPdbAdminAccountBeLocked: + description: // The locked mode of the pluggable database admin + account. If false, the user needs to provide the PDB Admin + Password to connect to it. // If true, the pluggable database + will be locked and user cannot login to it. + type: boolean + tdeWalletPassword: + description: // The existing TDE wallet password of the CDB. + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + description: DbCloneStatus defines the observed state of DbClone + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomouscontainerdatabases.yaml b/config/database.oracle.com_autonomouscontainerdatabases.yaml new file mode 100644 index 00000000..bac3a28c --- /dev/null +++ b/config/database.oracle.com_autonomouscontainerdatabases.yaml @@ -0,0 +1,117 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomouscontainerdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousContainerDatabase + listKind: AutonomousContainerDatabaseList + plural: autonomouscontainerdatabases + shortNames: + - acd + - acds + singular: autonomouscontainerdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousContainerDatabaseSpec defines the desired state + of AutonomousContainerDatabase + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + description: 'AutonomousContainerDatabasePatchModelEnum Enum with + underlying type: string' + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + description: AutonomousContainerDatabaseStatus defines the observed state + of AutonomousContainerDatabase + properties: + lifecycleState: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabasebackups.yaml b/config/database.oracle.com_autonomousdatabasebackups.yaml new file mode 100644 index 00000000..a5c37507 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabasebackups.yaml @@ -0,0 +1,138 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabasebackups.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseBackup + listKind: AutonomousDatabaseBackupList + plural: autonomousdatabasebackups + shortNames: + - adbbu + - adbbus + singular: autonomousdatabasebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousDatabaseBackupSpec defines the desired state of + AutonomousDatabaseBackup + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + k8sADB: + description: "*********************** *\tADB spec ***********************" + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + description: AutonomousDatabaseBackupStatus defines the observed state + of AutonomousDatabaseBackup + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + description: 'AutonomousDatabaseBackupLifecycleStateEnum Enum with + underlying type: string' + type: string + timeEnded: + type: string + timeStarted: + type: string + type: + description: 'AutonomousDatabaseBackupTypeEnum Enum with underlying + type: string' + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabaserestores.yaml b/config/database.oracle.com_autonomousdatabaserestores.yaml new file mode 100644 index 00000000..5e9f2c73 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabaserestores.yaml @@ -0,0 +1,138 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabaserestores.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseRestore + listKind: AutonomousDatabaseRestoreList + plural: autonomousdatabaserestores + shortNames: + - adbr + - adbrs + singular: autonomousdatabaserestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousDatabaseRestoreSpec defines the desired state of + AutonomousDatabaseRestore + properties: + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + description: 'EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO + OWN! NOTE: json tags are required. Any new fields you add must + have json tags for the fields to be serialized.' + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + description: 'The timestamp must follow this format: YYYY-MM-DD + HH:MM:SS GMT' + type: string + type: object + type: object + target: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + k8sADB: + description: "*********************** *\tADB spec ***********************" + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + description: AutonomousDatabaseRestoreStatus defines the observed state + of AutonomousDatabaseRestore + properties: + dbName: + type: string + displayName: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + status: + description: 'WorkRequestStatusEnum Enum with underlying type: string' + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabases.yaml b/config/database.oracle.com_autonomousdatabases.yaml new file mode 100644 index 00000000..f77407f3 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabases.yaml @@ -0,0 +1,324 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabase + listKind: AutonomousDatabaseList + plural: autonomousdatabases + shortNames: + - adb + - adbs + singular: autonomousdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabase is the Schema for the autonomousdatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase + Important: Run "make" to regenerate code after modifying this file' + properties: + details: + description: AutonomousDatabaseDetails defines the detail information + of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase + properties: + adminPassword: + properties: + k8sSecret: + description: "*********************** *\tSecret specs ***********************" + properties: + name: + type: string + type: object + ociSecret: + properties: + ocid: + type: string + type: object + type: object + autonomousContainerDatabase: + description: ACDSpec defines the spec of the target for backup/restore + runs. The name could be the name of an AutonomousDatabase or + an AutonomousDatabaseBackup + properties: + k8sACD: + description: "*********************** *\tACD specs ***********************" + properties: + name: + type: string + type: object + ociACD: + properties: + ocid: + type: string + type: object + type: object + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying + type: string' + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + licenseModel: + description: 'AutonomousDatabaseLicenseModelEnum Enum with underlying + type: string' + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + lifecycleState: + description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying + type: string' + type: string + networkAccess: + properties: + accessControlList: + items: + type: string + type: array + accessType: + enum: + - "" + - PUBLIC + - RESTRICTED + - PRIVATE + type: string + isAccessControlEnabled: + type: boolean + isMTLSConnectionRequired: + type: boolean + privateEndpoint: + properties: + hostnamePrefix: + type: string + nsgOCIDs: + items: + type: string + type: array + subnetOCID: + type: string + type: object + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + description: "*********************** *\tSecret specs + ***********************" + properties: + name: + type: string + type: object + ociSecret: + properties: + ocid: + type: string + type: object + type: object + type: object + type: object + hardLink: + default: false + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + required: + - details + type: object + status: + description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + timeCreated: + type: string + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_cdbs.yaml b/config/database.oracle.com_cdbs.yaml new file mode 100644 index 00000000..6b1c350c --- /dev/null +++ b/config/database.oracle.com_cdbs.yaml @@ -0,0 +1,270 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: cdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: CDB + listKind: CDBList + plural: cdbs + singular: cdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: CDB is the Schema for the cdbs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CDBSpec defines the desired state of CDB + properties: + cdbAdminPwd: + description: Password for the CDB Administrator to manage PDB lifecycle + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + description: User in the root container with sysdba priviledges to + manage PDB lifecycle + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + description: Name of the CDB + type: string + cdbTlsCrt: + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + description: DB server port + type: integer + dbServer: + description: Name of the DB server + type: string + dbTnsurl: + type: string + nodeSelector: + additionalProperties: + type: string + description: Node Selector for running the Pod + type: object + ordsImage: + description: ORDS Image Name + type: string + ordsImagePullPolicy: + description: ORDS Image Pull Policy + enum: + - Always + - Never + type: string + ordsImagePullSecret: + description: The name of the image pull secret in case of a private + docker repository. + type: string + ordsPort: + description: ORDS server port. For now, keep it as 8888. TO BE USED + IN FUTURE RELEASE. + type: integer + ordsPwd: + description: Password for user ORDS_PUBLIC_USER + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + description: Number of ORDS Containers to create + type: integer + serviceName: + description: Name of the CDB Service + type: string + sysAdminPwd: + description: Password for the CDB System Administrator + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + description: Password for the Web Server User + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + description: Web Server User with SQL Administrator role to allow + us to authenticate to the PDB Lifecycle Management REST endpoints + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + description: CDBStatus defines the observed state of CDB + properties: + msg: + description: Message + type: string + phase: + description: Phase of the CDB Resource + type: string + status: + description: CDB Resource Status + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_dataguardbrokers.yaml b/config/database.oracle.com_dataguardbrokers.yaml new file mode 100644 index 00000000..f19a3e22 --- /dev/null +++ b/config/database.oracle.com_dataguardbrokers.yaml @@ -0,0 +1,134 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: dataguardbrokers.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DataguardBroker + listKind: DataguardBrokerList + plural: dataguardbrokers + singular: dataguardbroker + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DataguardBroker is the Schema for the dataguardbrokers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DataguardBrokerSpec defines the desired state of DataguardBroker + properties: + fastStartFailOver: + properties: + enable: + type: boolean + strategy: + items: + description: FSFO strategy + properties: + sourceDatabaseRef: + type: string + targetDatabaseRefs: + type: string + type: object + type: array + type: object + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + description: DataguardBrokerStatus defines the observed state of DataguardBroker + properties: + clusterConnectString: + type: string + externalConnectString: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_oraclerestdataservices.yaml b/config/database.oracle.com_oraclerestdataservices.yaml new file mode 100644 index 00000000..121383fd --- /dev/null +++ b/config/database.oracle.com_oraclerestdataservices.yaml @@ -0,0 +1,224 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: oraclerestdataservices.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OracleRestDataService + listKind: OracleRestDataServiceList + plural: oraclerestdataservices + singular: oraclerestdataservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: OracleRestDataService is the Schema for the oraclerestdataservices + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OracleRestDataServiceSpec defines the desired state of OracleRestDataService + properties: + adminPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + apexPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + description: OracleRestDataServiceImage defines the Image source and + pullSecrets for POD + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + description: OracleRestDataServicePersistence defines the storage + releated params + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + description: OracleRestDataServicePDBSchemas defines the PDB Schemas + to be ORDS Enabled + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + description: OracleRestDataServiceStatus defines the observed state of + OracleRestDataService + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + description: OracleRestDataServiceImage defines the Image source and + pullSecrets for POD + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_pdbs.yaml b/config/database.oracle.com_pdbs.yaml new file mode 100644 index 00000000..85af8c1b --- /dev/null +++ b/config/database.oracle.com_pdbs.yaml @@ -0,0 +1,383 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: pdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: PDB + listKind: PDBList + plural: pdbs + singular: pdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: PDB is the Schema for the pdbs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PDBSpec defines the desired state of PDB + properties: + action: + description: 'Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. + Map is used to map a Databse PDB to a Kubernetes PDB CR.' + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + description: The administrator username for the new PDB. This property + is required when the Action property is Create. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + description: The administrator password for the new PDB. This property + is required when the Action property is Create. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + description: Indicate if 'AS CLONE' option should be used in the command + to plug in a PDB. This property is applicable when the Action property + is PLUG but not required. + type: boolean + assertivePdbDeletion: + description: turn on the assertive approach to delete pdb resource + kubectl delete pdb ..... automatically triggers the pluggable database + deletion + type: boolean + cdbName: + description: Name of the CDB + type: string + cdbNamespace: + description: CDB Namespace + type: string + cdbResName: + description: Name of the CDB Custom Resource that runs the ORDS container + type: string + copyAction: + description: To copy files or not while cloning a PDB + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + description: Specify if datafiles should be removed or not. The value + can be INCLUDING or KEEP (default). + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + description: Relevant for Create and Plug operations. As defined in + the Oracle Multitenant Database documentation. Values can be a + filename convert pattern or NONE. + type: string + getScript: + description: Whether you need the script only or execute the script + type: boolean + modifyOption: + description: Extra options for opening and closing a PDB + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + description: The name of the new PDB. Relevant for both Create and + Plug Actions. + type: string + pdbState: + description: The target state of the PDB + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + description: Whether to reuse temp file + type: boolean + sourceFileNameConversions: + description: This property is required when the Action property is + Plug. As defined in the Oracle Multitenant Database documentation. + Values can be a source filename convert pattern or NONE. + type: string + sparseClonePath: + description: A Path specified for sparse clone snapshot copy. (Optional) + type: string + srcPdbName: + description: Name of the Source PDB from which to clone + type: string + tdeExport: + description: TDE export for unplug operations + type: boolean + tdeImport: + description: TDE import for plug operations + type: boolean + tdeKeystorePath: + description: TDE keystore path is required if the tdeImport or tdeExport + flag is set to true. Can be used in plug or unplug operations. + type: string + tdePassword: + description: TDE password if the tdeImport or tdeExport flag is set + to true. Can be used in create, plug or unplug operations + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + description: TDE secret is required if the tdeImport or tdeExport + flag is set to true. Can be used in plug or unplug operations. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + description: Relevant for Create and Clone operations. Total size + for temporary tablespace as defined in the Oracle Multitenant Database + documentation. See size_clause description in Database SQL Language + Reference documentation. + type: string + totalSize: + description: Relevant for create and plug operations. Total size as + defined in the Oracle Multitenant Database documentation. See size_clause + description in Database SQL Language Reference documentation. + type: string + unlimitedStorage: + description: Relevant for Create and Plug operations. True for unlimited + storage. Even when set to true, totalSize and tempSize MUST be specified + in the request if Action is Create. + type: boolean + webServerPwd: + description: Password for the Web ServerPDB User + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + description: Web Server User with SQL Administrator role to allow + us to authenticate to the PDB Lifecycle Management REST endpoints + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + description: XML metadata filename to be used for Plug or Unplug operations + type: string + required: + - action + type: object + status: + description: PDBStatus defines the observed state of PDB + properties: + action: + description: Last Completed Action + type: string + connString: + description: PDB Connect String + type: string + modifyOption: + description: Modify Option of the PDB + type: string + msg: + description: Message + type: string + openMode: + description: Open mode of the PDB + type: string + phase: + description: Phase of the PDB Resource + type: string + status: + description: PDB Resource Status + type: boolean + totalSize: + description: Total size of the PDB + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_shardingdatabases.yaml b/config/database.oracle.com_shardingdatabases.yaml new file mode 100644 index 00000000..bb9bbd38 --- /dev/null +++ b/config/database.oracle.com_shardingdatabases.yaml @@ -0,0 +1,688 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + creationTimestamp: null + name: shardingdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: ShardingDatabase + listKind: ShardingDatabaseList + plural: shardingdatabases + singular: shardingdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v4 + schema: + openAPIV3Schema: + description: ShardingDatabase is the Schema for the shardingdatabases API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ShardingDatabaseSpec defines the desired state of ShardingDatabase + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + description: CatalogSpec defines the desired state of CatalogSpec + properties: + envVars: + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + description: Secret Details + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + description: GsmSpec defines the desired state of GsmSpec + properties: + directorName: + type: string + envVars: + description: Replicas int32 `json:"replicas,omitempty"` // + Gsm Replicas. If you set OraGsmPvcName then it is set default + to 1. + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: + type: string + gsmService: + items: + description: Service Definition + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + description: ShardSpace Specs + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + namespace: + type: string + portMappings: + items: + description: PortMapping is a specification of port mapping for + an application deployment. + properties: + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + items: + description: ShardSpec is a specification of Shards for an application + deployment. + properties: + deployAs: + type: string + envVars: + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 + ShardingDatabaseStatus defines the observed state of ShardingDatabase + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_singleinstancedatabases.yaml b/config/database.oracle.com_singleinstancedatabases.yaml new file mode 100644 index 00000000..1c011e17 --- /dev/null +++ b/config/database.oracle.com_singleinstancedatabases.yaml @@ -0,0 +1,421 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: singleinstancedatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: SingleInstanceDatabase + listKind: SingleInstanceDatabaseList + plural: singleinstancedatabases + singular: singleinstancedatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: SingleInstanceDatabase is the Schema for the singleinstancedatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase + properties: + adminPassword: + description: SingleInsatnceAdminPassword defines the secret containing + Admin Password mapped to secretKey for Database + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: + type: boolean + charset: + type: string + createAs: + enum: + - primary + - standby + - clone + type: string + dgBrokerConfigured: + type: boolean + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + description: SingleInstanceDatabaseImage defines the Image source + and pullSecrets for POD + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + description: SingleInstanceDatabaseInitParams defines the Init Parameters + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + description: SingleInstanceDatabasePersistence defines the storage + size and class for PVC + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + description: SID must be alphanumeric (no special characters, only + a-z, A-Z, 0-9), and no longer than 12 characters. + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + required: + - image + type: object + status: + description: SingleInstanceDatabaseStatus defines the observed state of + SingleInstanceDatabase + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBrokerConfigured: + type: boolean + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + description: SingleInstanceDatabaseInitParams defines the Init Parameters + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + description: SingleInstanceDatabasePersistence defines the storage + size and class for PVC + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 04f37b2c..d41001b0 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index c88bb7a7..9bd3bbc9 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml index cda36c4c..b34405d2 100644 --- a/config/default/manager_webhook_patch.yaml +++ b/config/default/manager_webhook_patch.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: apps/v1 diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml index 51b1069f..c6b7ea34 100644 --- a/config/default/webhookcainjection_patch.yaml +++ b/config/default/webhookcainjection_patch.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 30ed1f75..7a52fb17 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: @@ -9,4 +9,4 @@ kind: Kustomization images: - name: controller newName: container-registry.oracle.com/database/operator - newTag: 0.1.0 + newTag: 1.2.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 362aac61..54340faf 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: v1 @@ -26,8 +26,6 @@ spec: labels: control-plane: controller-manager spec: - imagePullSecrets: - - name: container-registry-secret containers: - command: - /manager @@ -43,4 +41,7 @@ spec: requests: cpu: 400m memory: 400Mi + env: + - name : WATCH_NAMESPACE + value : "" terminationGracePeriodSeconds: 10 diff --git a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml index 6ec37dd1..933a2bfa 100644 --- a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml @@ -3,25 +3,98 @@ kind: ClusterServiceVersion metadata: annotations: alm-examples: '[]' - capabilities: Basic Install + capabilities: Seamless Upgrades operators.operatorframework.io/builder: operator-sdk-v1.2.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v2 - name: oracle-database-operator.v0.0.0 - namespace: placeholder + name: oracle-database-operator.v1.1.0 + namespace: oracle-database-operator-system spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: DbcsSystem is the Schema for the dbcssystems API + displayName: Dbcs System + kind: DbcsSystem + name: DbcsSystem.database.oracle.com + version: v4 + - description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases + API + displayName: Autonomous Container Database + kind: AutonomousContainerDatabase + name: autonomouscontainerdatabases.database.oracle.com + version: v1alpha1 + - description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups + API + displayName: Autonomous Database Backup + kind: AutonomousDatabaseBackup + name: autonomousdatabasebackups.database.oracle.com + version: v1alpha1 + - description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores + API + displayName: Autonomous Database Restore + kind: AutonomousDatabaseRestore + name: autonomousdatabaserestores.database.oracle.com + version: v1alpha1 - description: AutonomousDatabase is the Schema for the autonomousdatabases API displayName: Autonomous Database kind: AutonomousDatabase name: autonomousdatabases.database.oracle.com version: v1alpha1 - description: Operator to manage Oracle sharding - displayName: Oracle Sharding DB Operator + - description: CDB is the Schema for the cdbs API + displayName: CDB + kind: CDB + name: cdbs.database.oracle.com + version: v1alpha1 + - description: DatabaseObserver is the Schema for the databaseobservers API + displayName: Database Observer + kind: DatabaseObserver + name: databaseobservers.observability.oracle.com + version: v1alpha1 + - description: DataguardBroker is the Schema for the dataguardbrokers API + displayName: Dataguard Broker + kind: DataguardBroker + name: dataguardbrokers.database.oracle.com + version: v1alpha1 + - description: OracleRestDataService is the Schema for the oraclerestdataservices + API + displayName: Oracle Rest Data Service + kind: OracleRestDataService + name: oraclerestdataservices.database.oracle.com + version: v1alpha1 + - description: PDB is the Schema for the pdbs API + displayName: PDB + kind: PDB + name: pdbs.database.oracle.com + version: v1alpha1 + - description: ShardingDatabase is the Schema for the shardingdatabases API + displayName: Sharding Database + kind: ShardingDatabase + name: shardingdatabases.database.oracle.com + version: v4 + - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases + API + displayName: Single Instance Database + kind: SingleInstanceDatabase + name: singleinstancedatabases.database.oracle.com + version: v1alpha1 + description: | + As part of Oracle's resolution to make Oracle Database Kubernetes native (that is, observable and operable by Kubernetes), Oracle released Oracle Database Operator for Kubernetes (OraOperator or the operator). OraOperator extends the Kubernetes API with custom resources and controllers for automating Oracle Database lifecycle management. + In this v1.1.0 production release, OraOperator supports the following database configurations and infrastructure: + ## Oracle Autonomous Database: + * Oracle Autonomous Database shared Oracle Cloud Infrastructure (OCI) (ADB-S) + * Oracle Autonomous Database on dedicated Cloud infrastructure (ADB-D) + * Oracle Autonomous Container Database (ACD) (infrastructure) is the infrastructure for provisioning Autonomous Databases. + * Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) and any k8s where OraOperator is deployed + * Containerized Sharded databases (SHARDED) deployed in OKE and any k8s where OraOperator is deployed + * Oracle Multitenant Databases (CDB/PDBs) + * Oracle Base Database Cloud Service (BDBCS) + * Oracle Data Guard (Preview status) + * Oracle Database Observability (Preview status) + * Oracle will continue to extend OraOperator to support additional Oracle Database configurations. + displayName: Oracle Database Operator icon: - - base64data: "" - mediatype: "" + - base64data: iVBORw0KGgoAAAANSUhEUgAAALQAAAC0CAYAAAA9zQYyAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0QAAAAAAAD5Q7t/AAAJjUlEQVR42u3cfcwcRQHH8S9PH0BokZfCVBgpOgjyFjRoQIQQkLeA0PLWqgQMFDVgja9AChIKKCEKSgQEQVsQJGKxtNCAvAi2vJiCqAQMUpQRMKM4vFiCQEUo/jH7kOt19m7vbveK8fdJLukzMzuzczc7OzszWxAREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREZH/X2tVSRStmwi8B5gErN1nWS8DAVhmgl9ZsdwpVc+xoteKc/iTCf7VujKN1o0A+xef5cDPTfCP1XjeY+VsAWwFTATGDZjdPSb4F6J1U9sjTPA31n3uXeq1MfBe4F30376ADo0lWjcBOAa4EHhHzXW4FzgDuNsE/2aHc3gJmFBz2WMuBc4ywT87SCbRunWAB4APtEV92gR/zaAnGa2bBJwInFVz/SeY4F+O1q32/Zvg6+xEyuq1PnA08F1gg7ryHSkpbDrwEnAZ9TdmgD2BxcDj0bptG8i/is8DMVq394D5zGb1xgxwdbRum34zjdaNi9adAjxD/Y15PxP8yzXn2UvdDiPdsX9IjY0ZOHOVKzFaNw64Apgx5DoelrvNNdxDt9rDBP/rXg+K1u0EPNIhyWPADp3uQiX5bggsIX+hDOp6E/z0lrKG1kMXQ7OLgJkNZL8SWO+tHjpatxbwM4bfmAEWRuuOWAPljrmvGGJVVlz8d3ZJth1wXI/5jgeW0UxjhnRnGrqifV1FM40ZYDcT/GujLQGnAUd2OGAu6Qd8oY/CxgGTgWOB3UvSzI/W7WiCf7RLXgcNUOnJwOUlcScCF/SQ10mAqZBubrTuVhP837sljNZB6lQmdUh2AbCUdMvu1Ssm+Of6OK4OXyb9/mV+DNxOf+0LE/yDUDwURuu2A/5YknYWcLEJ/pU6ahWt2wq4FtgjE/08YMZmQUqGHHua4O8boPx1gNuAvTPRI1WGB0UdnsxEPQzsnAm/HTjQBN8t36nAwpLoTwDzTfBv9Fv3kjIbH3JE6xzwREn0bOA7dY3px4Ycl5bEH2CC/1ZdjRnABP8UsBfwg0z0RODgusoqKf810uxNzsbdjm/pRXOOIP1A7Q4ADu2S7yjljXl7E/y8uhvzEF1YEj7FBH9OnQ+oI9G6zYF9MnEzTfB3NFG7ogf+ApDrsr7XRJlt/lYSvmGFY6cDu2XCZ5ngnwC+XXLcTcXDXpmyodheTcxpD0u0bjNgSibqZBP8orrLGyH1HjlXNFnRorc5KhPlioWcJo2WhL/e6aBo3abke+eVFL2QCX4F+eEMpDnXMsdnwu4ywd/T8HfRtLLv4pImChsFDsmEn2aCf73XzPrwUEn4+4Gep9F68LGS8G4PTN8vCf9oMZQBwAS/JFp3PTCtLd2MaN3ckmeAXIP+ZoPfwbCUDSFXFMO3Om03QlrkaLd4GDUtHsByPd7WTZUZrdsLuDUTdX+n5fBo3b6k4Ua7K0zw92fCy6an7o3WVV2seqRiurez3QfPopI5Jvhlo+SniPqaOunT05mwjmPZaN2WwKPAmy0fOvw99u/NOmR7aofyxgO/7OU4E/yz0brjSHOv7c4oPt3U9jC+Bk0aPItKvgZpDJ27zVZ5OKrLFpmwlzodYIL/K2k+egPgncX5bghsRJqp2KT4TAQ2LT6dGvONJvi7O8R/oyR8qgn+xQ7HXUP+wffrxSpjN+v195W+rQxj3vuQsd9hhDRJ326P3vLrTzGGOjoT9Zdux5rg7yWtxA3qsZJzGDvHXYCvZKIWAzd1OceVlE/X3VmsNnayfQ31W9MeaDj/W0zwN4/9MQosYvUv/YJo3UVVt3kOYAfyO/4qTVOZ4JcVu9EeAjbvo/xLSNNH/85FRuvWBsp67guBKRUfbM4jrcSucvqk1caxp/3rgE+2pfkqaWfi/7JfkO8w1m5i4mGUtGrWbhzwKdKKXiOKtf0rM1HPAbFqPib4GK17H2lRYv8Kh7xIWkj6kQned0n7JWB8JnwmaSfiFgzm4mjdomKxaQ6rN+jDo3UfMsH/dsBy1qSy/S4zaGBqeMQE/zTwu0zcT6J1u/WaYRVFr3YOsGsmema3JeJ2xUrmQcDFHZKdU9R3IxP86d0ac7Rua+D8XBRpP0iVi6eKecXFXXYneDBaN7mmsoau2MOyJBN1eTHjVKuxpe/PlMQvjdadUGGsV1mxcjSP8qf8Bf3ka4J/wwT/RdImmJwzSbv6uj5oFQ1sYUn0fkVZjwJn1/CV7ApM77Ik/1S0rq4LaE0o2+G3JFp3UjG0q8Vb49do3fnAyR3SngvcQ3rFqFfjAEtaaJjWId2uJvjftJxTX5uTonWHAzeURQMf7LT7LVp3LHB1Jup8E/ypLenWBVb08X3kbEbanLWU/J0L0uzP2aQ76qBTeq+a4B/ObU4CPlJTnVodTOpUypxH6smXD1JIa4MeBX5FfqFlGGaY4FcZUw+y264YLi3tkGQXE/zvM8dNIr0lkjO+faNWtG5n0sXai1syYfNN8EdF6zYhNeymHWmCv6GkQdftKtIo4GbgwCYLan9jZV1gPvDxIVSy1fEm+KvaAwfdPlqMg//cIcnhJviFbccsIr8dYD8TfLcN/ZVE6y4HPpeJOsAEf0dxUf2BNH/ehMXAPiZ4htSgNzbBLy+27l5Lfg9PLVZ5p7CYvjqU4b3V8B/gw7nGXIdi99umlO/FXRCtmzU29RatO4h8Y15QV2MuzCoJvz1aN8EE/w9gS3p74aAXx/T64D2AKSb45fDW1t1plD+zDWrf1V6SNcG/aYK/jDSmm917npU8Tdo7vH7TU1Im+OeBnShfBDkP+Gm0bgPyQwFIb7PUeU7/pHwx59wizQoT/CmAI793vF+fNcGHlr+P7zun7m4jrXO01h0T/BzSKu7pNZZ1nQn+rqr/L8dkYFvSvGu/T6T/Ap4CHjfBV9orEq07IRN8pwn+yV4LL17Q7PTj3UV+F95zTfw/FcVMSvb9zeIHz53/1sA2pEWZfmeermxfMIvW7Uh6EB3pL8tSN5vgn+mWqNibsw3wbvpvXwuqtisREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREWnxX2ox1/vZSvwPAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDI0LTA4LTEzVDE5OjUyOjMxKzAwOjAwsDIMcAAAACV0RVh0ZGF0ZTptb2RpZnkAMjAyNC0wOC0xM1QxOTo1MjozMSswMDowMMFvtMwAAABVdEVYdHN2Zzpjb21tZW50ACBVcGxvYWRlZCB0bzogU1ZHIFJlcG8sIHd3dy5zdmdyZXBvLmNvbSwgR2VuZXJhdG9yOiBTVkcgUmVwbyBNaXhlciBUb29scyBFB1wTAAAAAElFTkSuQmCC + mediatype: png install: spec: deployments: null @@ -37,12 +110,12 @@ spec: type: AllNamespaces keywords: - Oracle - - sharding - - db + - Database + - Operator links: - name: Oracle Database Operator - url: https://oracle-database-operator.domain + url: https://github.com/oracle/oracle-database-operator maturity: alpha provider: - name: ShardingDatabase - version: 0.0.0 + name: Oracle + version: 1.2.0 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml index 2a0f628b..39275249 100644 --- a/config/manifests/kustomization.yaml +++ b/config/manifests/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: diff --git a/config/observability.oracle.com_databaseobservers.yaml b/config/observability.oracle.com_databaseobservers.yaml new file mode 100644 index 00000000..c69a3b99 --- /dev/null +++ b/config/observability.oracle.com_databaseobservers.yaml @@ -0,0 +1,9539 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: databaseobservers.observability.oracle.com +spec: + group: observability.oracle.com + names: + kind: DatabaseObserver + listKind: DatabaseObserverList + plural: databaseobservers + singular: databaseobserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbRole: + type: string + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + port: + type: string + release: + type: string + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbRole: + type: string + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + port: + type: string + release: + type: string + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbRole: + type: string + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + port: + type: string + release: + type: string + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index b3e55375..1a478f0d 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index fd7483d6..e275bf6d 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index 5632b87b..6a702753 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -1,15 +1,15 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: oracle-database-operator-proxy-rolebinding + name: proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: oracle-database-operator-proxy-role subjects: - kind: ServiceAccount name: default diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 72b1ecb8..8cca79bf 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: v1 diff --git a/config/rbac/autonomouscontainerdatabase_editor_role.yaml b/config/rbac/autonomouscontainerdatabase_editor_role.yaml new file mode 100644 index 00000000..8dccd4a5 --- /dev/null +++ b/config/rbac/autonomouscontainerdatabase_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit autonomouscontainerdatabases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomouscontainerdatabase-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomouscontainerdatabases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomouscontainerdatabases/status + verbs: + - get diff --git a/config/rbac/autonomouscontainerdatabase_viewer_role.yaml b/config/rbac/autonomouscontainerdatabase_viewer_role.yaml new file mode 100644 index 00000000..e9bcec50 --- /dev/null +++ b/config/rbac/autonomouscontainerdatabase_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view autonomouscontainerdatabases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomouscontainerdatabase-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomouscontainerdatabases + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomouscontainerdatabases/status + verbs: + - get diff --git a/config/rbac/autonomousdatabase_editor_role.yaml b/config/rbac/autonomousdatabase_editor_role.yaml index bd6172fd..4cc7959a 100644 --- a/config/rbac/autonomousdatabase_editor_role.yaml +++ b/config/rbac/autonomousdatabase_editor_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/autonomousdatabase_viewer_role.yaml b/config/rbac/autonomousdatabase_viewer_role.yaml index af24143f..089bf01e 100644 --- a/config/rbac/autonomousdatabase_viewer_role.yaml +++ b/config/rbac/autonomousdatabase_viewer_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/autonomousdatabasebackup_editor_role.yaml b/config/rbac/autonomousdatabasebackup_editor_role.yaml new file mode 100644 index 00000000..1d210196 --- /dev/null +++ b/config/rbac/autonomousdatabasebackup_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit autonomousdatabasebackups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomousdatabasebackup-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabasebackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabasebackups/status + verbs: + - get diff --git a/config/rbac/autonomousdatabasebackup_viewer_role.yaml b/config/rbac/autonomousdatabasebackup_viewer_role.yaml new file mode 100644 index 00000000..3be0c5cb --- /dev/null +++ b/config/rbac/autonomousdatabasebackup_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view autonomousdatabasebackups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomousdatabasebackup-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabasebackups + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabasebackups/status + verbs: + - get diff --git a/config/rbac/autonomousdatabaserestore_editor_role.yaml b/config/rbac/autonomousdatabaserestore_editor_role.yaml new file mode 100644 index 00000000..6efd98ae --- /dev/null +++ b/config/rbac/autonomousdatabaserestore_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit autonomousdatabaserestores. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomousdatabaserestore-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabaserestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabaserestores/status + verbs: + - get diff --git a/config/rbac/autonomousdatabaserestore_viewer_role.yaml b/config/rbac/autonomousdatabaserestore_viewer_role.yaml new file mode 100644 index 00000000..66cc7f51 --- /dev/null +++ b/config/rbac/autonomousdatabaserestore_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view autonomousdatabaserestores. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autonomousdatabaserestore-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabaserestores + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - autonomousdatabaserestores/status + verbs: + - get diff --git a/config/rbac/cdb_editor_role.yaml b/config/rbac/cdb_editor_role.yaml new file mode 100644 index 00000000..244ddff2 --- /dev/null +++ b/config/rbac/cdb_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit cdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cdb-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - cdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - cdbs/status + verbs: + - get diff --git a/config/rbac/cdb_viewer_role.yaml b/config/rbac/cdb_viewer_role.yaml new file mode 100644 index 00000000..78a84283 --- /dev/null +++ b/config/rbac/cdb_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view cdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cdb-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - cdbs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - cdbs/status + verbs: + - get diff --git a/config/rbac/databaseobserver_editor_role.yaml b/config/rbac/databaseobserver_editor_role.yaml new file mode 100644 index 00000000..900c4b88 --- /dev/null +++ b/config/rbac/databaseobserver_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit databaseobservers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: databaseobserver-editor-role +rules: + - apiGroups: + - observability.oracle.com + resources: + - databaseobservers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - observability.oracle.com + resources: + - databaseobservers/status + verbs: + - get \ No newline at end of file diff --git a/config/rbac/databaseobserver_viewer_role.yaml b/config/rbac/databaseobserver_viewer_role.yaml new file mode 100644 index 00000000..ef447b21 --- /dev/null +++ b/config/rbac/databaseobserver_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view databaseobservers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: databaseobserver-viewer-role +rules: + - apiGroups: + - observability.oracle.com + resources: + - databaseobservers + verbs: + - get + - list + - watch + - apiGroups: + - observability.oracle.com + resources: + - databaseobservers/status + verbs: + - get \ No newline at end of file diff --git a/config/rbac/dataguardbroker_editor_role.yaml b/config/rbac/dataguardbroker_editor_role.yaml new file mode 100644 index 00000000..d9ad534f --- /dev/null +++ b/config/rbac/dataguardbroker_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit dataguardbrokers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dataguardbroker-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - dataguardbrokers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - dataguardbrokers/status + verbs: + - get diff --git a/config/rbac/dataguardbroker_viewer_role.yaml b/config/rbac/dataguardbroker_viewer_role.yaml new file mode 100644 index 00000000..4fe41628 --- /dev/null +++ b/config/rbac/dataguardbroker_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view dataguardbrokers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dataguardbroker-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - dataguardbrokers + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - dataguardbrokers/status + verbs: + - get diff --git a/config/rbac/dbcssystem_editor_role.yaml b/config/rbac/dbcssystem_editor_role.yaml new file mode 100644 index 00000000..934eea97 --- /dev/null +++ b/config/rbac/dbcssystem_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit dbcssystems. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dbcssystem-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - dbcssystems + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - dbcssystems/status + verbs: + - get diff --git a/config/rbac/dbcssystem_viewer_role.yaml b/config/rbac/dbcssystem_viewer_role.yaml new file mode 100644 index 00000000..8153d112 --- /dev/null +++ b/config/rbac/dbcssystem_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view dbcssystems. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dbcssystem-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - dbcssystems + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - dbcssystems/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 6325cf8c..7a20231c 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 083cd887..68f2270f 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index 01f5d630..180bd383 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/rbac/lrest_editor_role.yaml b/config/rbac/lrest_editor_role.yaml new file mode 100644 index 00000000..7f5b2f01 --- /dev/null +++ b/config/rbac/lrest_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit lrests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrest-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - lrests/status + verbs: + - get diff --git a/config/rbac/lrest_viewer_role.yaml b/config/rbac/lrest_viewer_role.yaml new file mode 100644 index 00000000..d74bc977 --- /dev/null +++ b/config/rbac/lrest_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view lrests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrest-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrests + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - lrests/status + verbs: + - get diff --git a/config/rbac/lrpdb_editor_role.yaml b/config/rbac/lrpdb_editor_role.yaml new file mode 100644 index 00000000..20ae714a --- /dev/null +++ b/config/rbac/lrpdb_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit lrpdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrpdb-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrpdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - lrpdbs/status + verbs: + - get diff --git a/config/rbac/lrpdb_viewer_role.yaml b/config/rbac/lrpdb_viewer_role.yaml new file mode 100644 index 00000000..95bcaab5 --- /dev/null +++ b/config/rbac/lrpdb_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view lrpdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrpdb-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrpdbs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - lrpdbs/status + verbs: + - get diff --git a/config/rbac/oraclerestdataservice_editor_role.yaml b/config/rbac/oraclerestdataservice_editor_role.yaml new file mode 100644 index 00000000..bf2b4d02 --- /dev/null +++ b/config/rbac/oraclerestdataservice_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit oraclerestdataservices. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oraclerestdataservice-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - oraclerestdataservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - oraclerestdataservices/status + verbs: + - get diff --git a/config/rbac/oraclerestdataservice_viewer_role.yaml b/config/rbac/oraclerestdataservice_viewer_role.yaml new file mode 100644 index 00000000..a0a39cfd --- /dev/null +++ b/config/rbac/oraclerestdataservice_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view oraclerestdataservices. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oraclerestdataservice-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - oraclerestdataservices + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - oraclerestdataservices/status + verbs: + - get diff --git a/config/rbac/ordssrvs_editor_role.yaml b/config/rbac/ordssrvs_editor_role.yaml new file mode 100644 index 00000000..bc4170f6 --- /dev/null +++ b/config/rbac/ordssrvs_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ordssrvs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ordssrvs-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - ordssrvs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - ordssrvs/status + verbs: + - get diff --git a/config/rbac/ordssrvs_viewer_role.yaml b/config/rbac/ordssrvs_viewer_role.yaml new file mode 100644 index 00000000..8880c17d --- /dev/null +++ b/config/rbac/ordssrvs_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ordssrvs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ordssrvs-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - ordssrvs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - ordssrvs/status + verbs: + - get diff --git a/config/rbac/pdb_editor_role.yaml b/config/rbac/pdb_editor_role.yaml new file mode 100644 index 00000000..7d668e4a --- /dev/null +++ b/config/rbac/pdb_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit pdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pdb-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - pdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - pdbs/status + verbs: + - get diff --git a/config/rbac/pdb_viewer_role.yaml b/config/rbac/pdb_viewer_role.yaml new file mode 100644 index 00000000..5fcf68c9 --- /dev/null +++ b/config/rbac/pdb_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view pdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pdb-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - pdbs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - pdbs/status + verbs: + - get diff --git a/config/rbac/provshard_editor_role.yaml b/config/rbac/provshard_editor_role.yaml index 473a41c0..1df44f2c 100644 --- a/config/rbac/provshard_editor_role.yaml +++ b/config/rbac/provshard_editor_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/provshard_viewer_role.yaml b/config/rbac/provshard_viewer_role.yaml index 36b5bb4b..a4ef06ed 100644 --- a/config/rbac/provshard_viewer_role.yaml +++ b/config/rbac/provshard_viewer_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 6d763212..3a12386c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,16 +1,24 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: - "" resources: - configmaps + - containers + - deployments + - events + - namespaces + - persistentvolumeclaims + - pods + - pods/exec + - pods/log + - replicasets - secrets + - services verbs: - create - delete @@ -22,21 +30,29 @@ rules: - apiGroups: - "" resources: - - events - - nodes - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - services + - configmaps/status + - daemonsets/status + - deployments/status + - services/status + - statefulsets/status verbs: - - create - - delete - get - - list - patch - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list - watch +- apiGroups: + - "" + resources: + - secrets/status + verbs: + - get - apiGroups: - '''''' resources: @@ -52,6 +68,18 @@ rules: - apiGroups: - apps resources: + - configmaps + verbs: + - get + - list +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - pods + - replicasets + - services - statefulsets verbs: - create @@ -71,18 +99,21 @@ rules: - list - update - apiGroups: - - "" + - database.oracle.com resources: - - configmaps + - autonomouscontainerdatabases + - autonomousdatabases + - cdbs + - dataguardbrokers + - dbcssystems - events - - namespaces - - nodes - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - secrets - - services + - lrests + - lrpdbs + - oraclerestdataservices + - ordssrvs + - pdbs + - shardingdatabases + - singleinstancedatabases verbs: - create - delete @@ -92,21 +123,35 @@ rules: - update - watch - apiGroups: - - "" + - database.oracle.com resources: - - pods/exec + - autonomouscontainerdatabases/status + - autonomousdatabasebackups/status + - autonomousdatabaserestores/status + - cdbs/status + - dataguardbrokers/status + - dbcssystems/status + - lrests/status + - lrpdbs/status + - oraclerestdataservices/status + - ordssrvs/status + - pdbs/status + - shardingdatabases/status + - singleinstancedatabases/status verbs: - - create + - get + - patch + - update - apiGroups: - database.oracle.com resources: - - autonomousdatabases + - autonomousdatabasebackups + - autonomousdatabaserestores verbs: - create - delete - get - list - - patch - update - watch - apiGroups: @@ -119,18 +164,20 @@ rules: - apiGroups: - database.oracle.com resources: - - shardingdatabases + - cdbs/finalizers + - dataguardbrokers/finalizers + - lrests/finalizers + - oraclerestdataservices/finalizers + - ordssrvs/finalizers + - singleinstancedatabases/finalizers verbs: - - create - - delete - - get - - list - - patch - update - - watch - apiGroups: - database.oracle.com resources: + - dbcssystems/finalizers + - lrpdbs/finalizers + - pdbs/finalizers - shardingdatabases/finalizers verbs: - create @@ -139,17 +186,21 @@ rules: - patch - update - apiGroups: - - database.oracle.com + - monitoring.coreos.com resources: - - shardingdatabases/status + - servicemonitors verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases + - databaseobservers verbs: - create - delete @@ -159,16 +210,24 @@ rules: - update - watch - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases/finalizers + - databaseobservers/finalizers verbs: - update - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases/status + - databaseobservers/status verbs: - get - patch - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 1ec61dc5..f2ccb566 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,9 +1,9 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: name: oracle-database-operator-manager-rolebinding roleRef: diff --git a/config/rbac/shardingdatabase_editor_role.yaml b/config/rbac/shardingdatabase_editor_role.yaml index f9660cb7..efe6ad75 100644 --- a/config/rbac/shardingdatabase_editor_role.yaml +++ b/config/rbac/shardingdatabase_editor_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/shardingdatabase_viewer_role.yaml b/config/rbac/shardingdatabase_viewer_role.yaml index 717c61a4..08b8ca26 100644 --- a/config/rbac/shardingdatabase_viewer_role.yaml +++ b/config/rbac/shardingdatabase_viewer_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/singleinstancedatabase_editor_role.yaml b/config/rbac/singleinstancedatabase_editor_role.yaml index 90a19c43..918ef991 100644 --- a/config/rbac/singleinstancedatabase_editor_role.yaml +++ b/config/rbac/singleinstancedatabase_editor_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/rbac/singleinstancedatabase_viewer_role.yaml b/config/rbac/singleinstancedatabase_viewer_role.yaml index 84bea03d..c1f0f469 100644 --- a/config/rbac/singleinstancedatabase_viewer_role.yaml +++ b/config/rbac/singleinstancedatabase_viewer_role.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/samples/acd/autonomouscontainerdatabase_bind.yaml b/config/samples/acd/autonomouscontainerdatabase_bind.yaml new file mode 100644 index 00000000..3d28ba4d --- /dev/null +++ b/config/samples/acd/autonomouscontainerdatabase_bind.yaml @@ -0,0 +1,14 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousContainerDatabase +metadata: + name: autonomouscontainerdatabase-sample +spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml b/config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml new file mode 100644 index 00000000..dd75250d --- /dev/null +++ b/config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousContainerDatabase +metadata: + name: autonomouscontainerdatabase-sample +spec: + # Update compartmentOCID with your compartment OCID. + compartmentOCID: ocid1.compartment... OR ocid1.tenancy... + displayName: newACD + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/acd/autonomouscontainerdatabase_create.yaml b/config/samples/acd/autonomouscontainerdatabase_create.yaml new file mode 100644 index 00000000..5f42a136 --- /dev/null +++ b/config/samples/acd/autonomouscontainerdatabase_create.yaml @@ -0,0 +1,20 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousContainerDatabase +metadata: + name: autonomouscontainerdatabase-sample +spec: + # Update compartmentOCID with your compartment OCID. + compartmentOCID: ocid1.compartment... OR ocid1.tenancy... + autonomousExadataVMClusterOCID: ocid1.autonomousexainfrastructure... + displayName: newACD + # # An optional field for Database Patch model preference. Should be either RELEASE_UPDATES or RELEASE_UPDATE_REVISIONS + # patchModel: RELEASE_UPDATES + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/acd/autonomouscontainerdatabase_delete_resource.yaml b/config/samples/acd/autonomouscontainerdatabase_delete_resource.yaml new file mode 100644 index 00000000..5be06b5a --- /dev/null +++ b/config/samples/acd/autonomouscontainerdatabase_delete_resource.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousContainerDatabase +metadata: + name: autonomouscontainerdatabase-sample +spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + # Delete this resource to terminate database after the changes applied + hardLink: true + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml b/config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml new file mode 100644 index 00000000..0e884f6e --- /dev/null +++ b/config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousContainerDatabase +metadata: + name: autonomouscontainerdatabase-sample +spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + # Change the action to "TERMINATE" to terminate the database + action: RESTART + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/adb/autonomousdatabase_backup.yaml b/config/samples/adb/autonomousdatabase_backup.yaml new file mode 100644 index 00000000..0099a347 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_backup.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabaseBackup +metadata: + name: autonomousdatabasebackup-sample +spec: + # Before you can create on-demand backups, you must have an Object Storage bucket and your database must be configured to connect to it. This is a one-time operation. + # See https://docs.oracle.com/en-us/iaas/Content/Database/Tasks/adbbackingup.htm#creatingbucket + target: + k8sADB: + name: autonomousdatabase-sample + # # Uncomment the below block if you use ADB OCID as the input of the target ADB + # ociADB: + # ocid: ocid1.autonomousdatabase... + displayName: autonomousdatabasebackup-sample + isLongTermBackup: true + retentionPeriodInDays: 90 # minimum retention period is 90 days + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/autonomousdatabase_bind.yaml b/config/samples/adb/autonomousdatabase_bind.yaml similarity index 80% rename from config/samples/autonomousdatabase_bind.yaml rename to config/samples/adb/autonomousdatabase_bind.yaml index d52b9c18..702b8f03 100644 --- a/config/samples/autonomousdatabase_bind.yaml +++ b/config/samples/adb/autonomousdatabase_bind.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Sync details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/autonomousdatabase_create.yaml b/config/samples/adb/autonomousdatabase_clone.yaml similarity index 50% rename from config/samples/autonomousdatabase_create.yaml rename to config/samples/adb/autonomousdatabase_clone.yaml index fb58b656..559d7185 100644 --- a/config/samples/autonomousdatabase_create.yaml +++ b/config/samples/adb/autonomousdatabase_clone.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, 2024, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -7,20 +7,29 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Clone details: + id: ocid1.autonomousdatabase... + clone: # Update compartmentOCID with your compartment OCID. - compartmentOCID: ocid1.compartment... + compartmentId: ocid1.compartment... OR ocid1.tenancy... # The dbName must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. - dbName: NewADB - displayName: NewADB + dbName: ClonedADB + displayName: ClonedADB cpuCoreCount: 1 adminPassword: - # The Name of the K8s secret where you want to hold the password of the ADMIN account. Comment out k8sSecretName and uncomment ociSecretOCID if you pass the admin password using OCI Secret. - k8sSecretName: admin-password - # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . - # ociSecretOCID: ocid1.vaultsecret... + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: admin-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # id: ocid1.vaultsecret... dataStorageSizeInTBs: 1 + dbWorkload: OLTP + cloneType: METADATA # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred + # Comment out secretName if using OKE workload identity secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/adb/autonomousdatabase_create.yaml b/config/samples/adb/autonomousdatabase_create.yaml new file mode 100644 index 00000000..d633cb84 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_create.yaml @@ -0,0 +1,66 @@ +# +# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Create + details: + # Update compartmentOCID with your compartment OCID. + compartmentId: ocid1.compartment... OR ocid1.tenancy... + # The dbName must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. + dbName: NewADB + displayName: NewADB + cpuCoreCount: 1 + adminPassword: + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: admin-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # ocid: ocid1.vaultsecret... + dataStorageSizeInTBs: 1 + + # networkAccess: + # # Uncomment this block to configure the network access type with the PUBLIC option, which allows secure access from everywhere. + # accessType: PUBLIC + + # # Uncomment this block to configure the network access type with the RESTRICTED option. + # # This option lets you restrict access by defining access control rules in an Access Control List (ACL). + # # By specifying an ACL, the database will be accessible from a whitelisted set of IP addresses, CIDR (Classless Inter-Domain Routing) blocks, or VCNs. + # # Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs. + # accessType: RESTRICTED + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 + # - ocid1.vcn... + # - ocid1.vcn...;1.1.1.1 + # - ocid1.vcn...;1.1.0.0/16 + # isMTLSConnectionRequired: true + + # # Uncomment this block to configure the network access type with the PRIVATE option. + # # This option assigns a private endpoint, private IP, and hostname to your database. + # # Specifying this option allows traffic only from the VCN you specify. + # # This allows you to define security rules, ingress/egress, at the Network Security Group (NSG) level and to control traffic to your Autonomous Database. + # accessType: PRIVATE + # privateEndpoint: + # subnetOCID: ocid1.subnet... + # nsgOCIDs: + # - ocid1.networksecuritygroup... + # isMTLSConnectionRequired: true + + # # Uncomment this block to configure the network access of an dedicated Autonomous Database (ADB-D) with an access control list. + # isAccessControlEnabled: true + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + # Comment out secretName if using OKE workload identity + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/autonomousdatabase_delete_resource.yaml b/config/samples/adb/autonomousdatabase_delete_resource.yaml similarity index 83% rename from config/samples/autonomousdatabase_delete_resource.yaml rename to config/samples/adb/autonomousdatabase_delete_resource.yaml index e075787e..bae1f605 100644 --- a/config/samples/autonomousdatabase_delete_resource.yaml +++ b/config/samples/adb/autonomousdatabase_delete_resource.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -8,7 +8,7 @@ metadata: name: autonomousdatabase-sample spec: details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Delete this resource to terminate database after the changes applied hardLink: true # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. diff --git a/config/samples/autonomousdatabase_rename.yaml b/config/samples/adb/autonomousdatabase_rename.yaml similarity index 84% rename from config/samples/autonomousdatabase_rename.yaml rename to config/samples/adb/autonomousdatabase_rename.yaml index 8aa2ae8b..22dbcc0f 100644 --- a/config/samples/autonomousdatabase_rename.yaml +++ b/config/samples/adb/autonomousdatabase_rename.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # The database name dbName: RenamedADB # The user-friendly name for the Autonomous Database diff --git a/config/samples/adb/autonomousdatabase_restore.yaml b/config/samples/adb/autonomousdatabase_restore.yaml new file mode 100644 index 00000000..3db8a1b6 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_restore.yaml @@ -0,0 +1,31 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabaseRestore +metadata: + name: autonomousdatabaserestore-sample +spec: + # Restore the database either from a backup or using point-in-time restore + # The name of your AutonomousDatabaseBackup resource + target: + k8sADB: + name: autonomousdatabase-sample + # # Uncomment the below block if you use ADB OCID as the input of the target ADB + # ociADB: + # ocid: ocid1.autonomousdatabase... + source: + k8sADBBackup: + name: autonomousdatabasebackup-sample + # # Uncomment the following field to perform point-in-time restore + # pointInTime: + # # The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT + # timestamp: 2022-12-23 11:03:13 UTC + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + \ No newline at end of file diff --git a/config/samples/autonomousdatabase_scale.yaml b/config/samples/adb/autonomousdatabase_scale.yaml similarity index 85% rename from config/samples/autonomousdatabase_scale.yaml rename to config/samples/adb/autonomousdatabase_scale.yaml index 4a7c85e9..ea53e94d 100644 --- a/config/samples/autonomousdatabase_scale.yaml +++ b/config/samples/adb/autonomousdatabase_scale.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Your database's OPCU core count cpuCoreCount: 2 # Your database's storage size in TB diff --git a/config/samples/autonomousdatabase_stop_start_terminate.yaml b/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml similarity index 68% rename from config/samples/autonomousdatabase_stop_start_terminate.yaml rename to config/samples/adb/autonomousdatabase_stop_start_terminate.yaml index 83d831cd..4a191dd6 100644 --- a/config/samples/autonomousdatabase_stop_start_terminate.yaml +++ b/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 @@ -7,10 +7,10 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + + action: Stop # Use the value "Start" to start the database details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - # Change the lifecycleState to "AVAILABLE" to start the database - lifecycleState: STOPPED + id: ocid1.autonomousdatabase... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/adb/autonomousdatabase_update_admin_password.yaml b/config/samples/adb/autonomousdatabase_update_admin_password.yaml new file mode 100644 index 00000000..be7aca69 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_update_admin_password.yaml @@ -0,0 +1,24 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Update + details: + id: ocid1.autonomousdatabase... + adminPassword: + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: new-admin-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # id: ocid1.vaultsecret... + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/adb/autonomousdatabase_update_mtls.yaml b/config/samples/adb/autonomousdatabase_update_mtls.yaml new file mode 100644 index 00000000..25eda529 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_update_mtls.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Update + details: + id: ocid1.autonomousdatabase... + # Set the patameter to false to allow both TLS and mutual TLS (mTLS) authentication, or true to require mTLS connections and disallow TLS connections. + isMTLSConnectionRequired: true + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/adb/autonomousdatabase_update_network_access.yaml b/config/samples/adb/autonomousdatabase_update_network_access.yaml new file mode 100644 index 00000000..7dd3fa0c --- /dev/null +++ b/config/samples/adb/autonomousdatabase_update_network_access.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Update + details: + id: ocid1.autonomousdatabase... + # # Allow secure access from everywhere. Uncomment one of the following field depends on your network access configuration. + # accessControlList: + # - + # privateEndpoint: "" + + # # Uncomment this block to configure the network access type with the RESTRICTED option. + # # This option lets you restrict access by defining access control rules in an Access Control List (ACL). + # # By specifying an ACL, the database will be accessible from a whitelisted set of IP addresses, CIDR (Classless Inter-Domain Routing) blocks, or VCNs. + # # Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs. + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 + # - ocid1.vcn... + # - ocid1.vcn...;1.1.1.1 + # - ocid1.vcn...;1.1.0.0/16 + + # # Uncomment this block to configure the network access type with the PRIVATE option. + # # This option assigns a private endpoint, private IP, and hostname to your database. + # # Specifying this option allows traffic only from the VCN you specify. + # # This allows you to define security rules, ingress/egress, at the Network Security Group (NSG) level and to control traffic to your Autonomous Database. + # privateEndpoint: + # subnetOCID: ocid1.subnet... + # nsgOCIDs: # Optional + # - ocid1.networksecuritygroup... + + # # Uncomment this block to configure the network access of an dedicated Autonomous Database (ADB-D) with an access control list. + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 + + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/adb/autonomousdatabase_wallet.yaml b/config/samples/adb/autonomousdatabase_wallet.yaml new file mode 100644 index 00000000..84136647 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_wallet.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Update + details: + id: ocid1.autonomousdatabase... + wallet: + # Insert a name of the secret where you want the wallet to be stored. The default name is -instance-wallet. + name: instance-wallet + password: + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: instance-wallet-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # id: ocid1.vaultsecret... + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/autonomousdatabase.yaml b/config/samples/autonomousdatabase.yaml deleted file mode 100644 index c1dfc078..00000000 --- a/config/samples/autonomousdatabase.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: AutonomousDatabase -metadata: - name: autonomousdatabase-sample -spec: - # Add fields here - foo: bar diff --git a/config/samples/autonomousdatabase_change_admin_password.yaml b/config/samples/autonomousdatabase_change_admin_password.yaml deleted file mode 100644 index 47ad1d65..00000000 --- a/config/samples/autonomousdatabase_change_admin_password.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: AutonomousDatabase -metadata: - name: autonomousdatabase-sample -spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - adminPassword: - # The Name of the secret where you want to hold the password of the ADMIN account. Comment out k8sSecretName and uncomment ociSecretOCID if you pass the admin password using OCI Secret. - k8sSecretName: new-admin-password - # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . - # ociSecretOCID: ocid1.vaultsecret... - # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/autonomousdatabase_wallet.yaml b/config/samples/autonomousdatabase_wallet.yaml deleted file mode 100644 index 34953403..00000000 --- a/config/samples/autonomousdatabase_wallet.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: AutonomousDatabase -metadata: - name: autonomousdatabase-sample -spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - wallet: - # Insert a name of the secret where you want the wallet to be stored. The default name is -instance-wallet. - name: instance-wallet - password: - # The Name of the secret where you want to hold the wallet password. Comment out k8sSecretName and uncomment ociSecretOCID if you pass the wallet password using OCI Secret. - k8sSecretName: instance-wallet-password - # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . - # ociSecretOCID: ocid1.vaultsecret... - # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey diff --git a/config/samples/dbcs/database_v1alpha1_dbcssystem.yaml b/config/samples/dbcs/database_v1alpha1_dbcssystem.yaml new file mode 100644 index 00000000..0ca38ddf --- /dev/null +++ b/config/samples/dbcs/database_v1alpha1_dbcssystem.yaml @@ -0,0 +1,7 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-sample +spec: + # Add fields here + foo: bar diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 4b419923..1a032832 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,12 +1,51 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # ## Append samples you want in your CSV to this file as resources ## resources: - - sharding_v1alpha1_provshard.yaml - - autonomousdatabase.yaml - - singleinstancedatabase.yaml - - shardingdatabase.yaml + - multitenant/pdb_plug.yaml + - multitenant/cdb_secret.yaml + - multitenant/pdb_secret.yaml + - multitenant/pdb_clone.yaml + - multitenant/cdb.yaml + - sidb/singleinstancedatabase_patch.yaml + - sidb/oraclerestdataservice_apex.yaml + - sidb/singleinstancedatabase_express.yaml + - sidb/singleinstancedatabase_secrets.yaml + - sidb/singleinstancedatabase_clone.yaml + - sidb/singleinstancedatabase_prebuiltdb.yaml + - sidb/dataguardbroker.yaml + - sidb/oraclerestdataservice_secrets.yaml + - sidb/singleinstancedatabase_free.yaml + - sidb/singleinstancedatabase_standby.yaml + - sidb/openshift_rbac.yaml + - sharding/sharding_v1alpha1_provshard_clonespec1.yaml + - sharding/shardingdatabase.yaml + - sharding/sharding_v1alpha1_provshard_clonespec.yaml + - observability/databaseobserver_vault.yaml + - observability/databaseobserver_minimal.yaml + - adb/autonomousdatabase_bind.yaml + - adb/autonomousdatabase_backup.yaml + - adb/autonomousdatabase_restore.yaml + - acd/autonomouscontainerdatabase_create.yaml + - sidb/singleinstancedatabase.yaml + - sharding/shardingdatabase.yaml + - sharding/sharding_v1alpha1_provshard.yaml + - dbcs/database_v1alpha1_dbcssystem.yaml + - database_v1alpha1_dataguardbroker.yaml + - database_v1alpha1_shardingdatabase.yaml + - observability/v1alpha1/databaseobserver.yaml + - observability/v1/databaseobserver.yaml + - observability/v4/databaseobserver.yaml + - acd/autonomouscontainerdatabase_restart_terminate.yaml + - database_v4_shardingdatabase.yaml + - database_v4_dbcssystem.yaml +- database_v4_lrest.yaml +- database_v4_lrpdb.yaml +- database_v4_ordssrvs.yaml +- database_v4_singleinstancedatabase.yaml +- database_v4_dataguardbroker.yaml +- database_v4_oraclerestdataservice.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/multitenant/cdb.yaml b/config/samples/multitenant/cdb.yaml new file mode 100644 index 00000000..e3513d12 --- /dev/null +++ b/config/samples/multitenant/cdb.yaml @@ -0,0 +1,43 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "devcdb" + dbServer: "172.17.0.4" + dbPort: 1521 + replicas: 1 + ordsImage: "" + ordsImagePullPolicy: "Always" + # Uncomment Below Secret Format for accessing ords image from private docker registry + # ordsImagePullSecret: "" + serviceName: "devdb.example.com" + sysAdminPwd: + secret: + secretName: "cdb1-secret" + key: "sysadmin_pwd" + ordsPwd: + secret: + secretName: "cdb1-secret" + key: "ords_pwd" + cdbAdminUser: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_user" + cdbAdminPwd: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_pwd" + webServerUser: + secret: + secretName: "cdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "cdb1-secret" + key: "webserver_pwd" diff --git a/config/samples/multitenant/cdb_secret.yaml b/config/samples/multitenant/cdb_secret.yaml new file mode 100644 index 00000000..e270100d --- /dev/null +++ b/config/samples/multitenant/cdb_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: cdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + ords_pwd: "[base64 encode value]" + sysadmin_pwd: "[base64 encode value]" + cdbadmin_user: "[base64 encode value]" + cdbadmin_pwd: "[base64 encode value]" + webserver_user: "[base64 encode values]" + webserver_pwd: "[base64 encode values]" diff --git a/config/samples/multitenant/pdb_clone.yaml b/config/samples/multitenant/pdb_clone.yaml new file mode 100644 index 00000000..f36e904d --- /dev/null +++ b/config/samples/multitenant/pdb_clone.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1-clone + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbName: "devcdb" + pdbName: "pdbdevclone" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Clone" diff --git a/config/samples/multitenant/pdb_create.yaml b/config/samples/multitenant/pdb_create.yaml new file mode 100644 index 00000000..2be31acf --- /dev/null +++ b/config/samples/multitenant/pdb_create.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbName: "devcdb" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Create" diff --git a/config/samples/multitenant/pdb_delete.yaml b/config/samples/multitenant/pdb_delete.yaml new file mode 100644 index 00000000..6c5299c0 --- /dev/null +++ b/config/samples/multitenant/pdb_delete.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" \ No newline at end of file diff --git a/config/samples/multitenant/pdb_modify.yaml b/config/samples/multitenant/pdb_modify.yaml new file mode 100644 index 00000000..feac2dbf --- /dev/null +++ b/config/samples/multitenant/pdb_modify.yaml @@ -0,0 +1,22 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbName: "democdb" + pdbName: "demotest" + action: "Modify" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + + # To Open an existing PDB, uncomment the below lines and comment the two lines above + #pdbState: "OPEN" + #modifyOption: "READ WRITE" \ No newline at end of file diff --git a/config/samples/multitenant/pdb_plug.yaml b/config/samples/multitenant/pdb_plug.yaml new file mode 100644 index 00000000..b48c4ffc --- /dev/null +++ b/config/samples/multitenant/pdb_plug.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + pdbName: "pdbdev" + xmlFileName: "/opt/oracle/oradata/pdbdev.xml" + sourceFileNameConversions: "NONE" + fileNameConversions: "NONE" + copyAction: "NOCOPY" + action: "Plug" \ No newline at end of file diff --git a/config/samples/multitenant/pdb_secret.yaml b/config/samples/multitenant/pdb_secret.yaml new file mode 100644 index 00000000..8a3202d9 --- /dev/null +++ b/config/samples/multitenant/pdb_secret.yaml @@ -0,0 +1,13 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: pdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + sysadmin_user: "[ base64 encode value]" + sysadmin_pwd: "[ base64 encode value]" diff --git a/config/samples/multitenant/pdb_unplug.yaml b/config/samples/multitenant/pdb_unplug.yaml new file mode 100644 index 00000000..21d7b187 --- /dev/null +++ b/config/samples/multitenant/pdb_unplug.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + pdbName: "pdbdev" + xmlFileName: "/opt/oracle/oradata/demotest_pdb.xml" + action: "Unplug" + tdeExport: true + tdeSecret: + secret: + secretName: "pdb1-secret" + key: "tde_secret" + tdeKeystorePath: "/opt/oracle/test" + tdePassword: + secret: + secretName: "pdb1-secret" + key: "tde_pwd" + getScript: true \ No newline at end of file diff --git a/config/samples/observability/databaseobserver.yaml b/config/samples/observability/databaseobserver.yaml new file mode 100644 index 00000000..b3140549 --- /dev/null +++ b/config/samples/observability/databaseobserver.yaml @@ -0,0 +1,44 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + exporter: + image: "container-registry.oracle.com/database/observability-exporter:latest" + configuration: + configmap: + key: "config.toml" + configmapName: "devcm-oradevdb-config" + + service: + port: 9161 + + prometheus: + port: metrics + labels: + app: app-sample-label + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/databaseobserver_custom_config.yaml b/config/samples/observability/databaseobserver_custom_config.yaml new file mode 100644 index 00000000..1e9fff47 --- /dev/null +++ b/config/samples/observability/databaseobserver_custom_config.yaml @@ -0,0 +1,28 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + namespace: observer +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + exporter: + configuration: + configmap: + key: "config.toml" + configmapName: "devcm-oradevdb-config" \ No newline at end of file diff --git a/config/samples/observability/databaseobserver_minimal.yaml b/config/samples/observability/databaseobserver_minimal.yaml new file mode 100644 index 00000000..2eeaf3ab --- /dev/null +++ b/config/samples/observability/databaseobserver_minimal.yaml @@ -0,0 +1,22 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + namespace: observer +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets \ No newline at end of file diff --git a/config/samples/observability/databaseobserver_vault.yaml b/config/samples/observability/databaseobserver_vault.yaml new file mode 100644 index 00000000..fa2e09d4 --- /dev/null +++ b/config/samples/observability/databaseobserver_vault.yaml @@ -0,0 +1,25 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + vaultSecretName: sample_secret + vaultOCID: ocid1.vault.oc1.. + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/observability/sample-dashboard.json b/config/samples/observability/sample-dashboard.json new file mode 100644 index 00000000..5b05b05c --- /dev/null +++ b/config/samples/observability/sample-dashboard.json @@ -0,0 +1,1414 @@ +{ + "__inputs": [ + { + "name": "Prometheus", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.5.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 5, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 3, + "panels": [], + "title": "Oracle Database Details", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "DEAD" + }, + "1": { + "index": 0, + "text": "ALIVE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 10, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "oracledb_up", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Database Status", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 4, + "y": 1 + }, + "id": 5, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "oracledb_obaas_db_system_value{name=\"sga_max_size\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "SGA Max Size", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 7, + "y": 1 + }, + "id": 6, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "oracledb_obaas_db_system_value{name=\"pga_aggregate_limit\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "PGA Aggregate Limit", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 10, + "y": 1 + }, + "id": 11, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_sessions_value{status=\"ACTIVE\"}", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Active Sessions", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 15, + "y": 1 + }, + "id": 12, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_activity_user_commits", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "User commits", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 1 + }, + "id": 13, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_activity_execute_count", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Execute count", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 7, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": false + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(oracledb_obaas_db_platform_value) by (platform_name)", + "format": "table", + "instant": true, + "legendFormat": "{{platform_name}}", + "range": false, + "refId": "A" + } + ], + "title": "Database Platform", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true + }, + "indexByName": {}, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 4 + }, + "id": 2, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "oracledb_obaas_db_system_value{name=\"cpu_count\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "CPU Count", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 8, + "panels": [], + "title": "Top SQL", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "SQL ID" + }, + "properties": [ + { + "id": "custom.width", + "value": 226 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "SQL Text (extract)" + }, + "properties": [ + { + "id": "custom.width", + "value": 1311 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 9, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "9.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(oracledb_obaas_top_sql_elapsed) by (sql_id, sql_text)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Top SQL by elapsed time running", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Elapsed Time", + "sql_id": "SQL ID", + "sql_text": "SQL Text (extract)" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Elapsed Time" + } + ] + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 4, + "panels": [], + "title": "System Wait Classes", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 19, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_concurrency", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - Concurrency", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_commit", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - Commit", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_system_io", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - System I/O", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_user_io", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - User I/O", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 34 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_application", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - Application", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "oracledb_wait_time_network", + "interval": "$interval", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Wait time - Network", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "auto": true, + "auto_count": 200, + "auto_min": "10s", + "current": { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + "hide": 0, + "label": "Interval", + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Oracle Dashboard", + "uid": "obaas_oracle_dashboard", + "version": 20, + "weekStart": "" +} \ No newline at end of file diff --git a/config/samples/observability/sample_config.toml b/config/samples/observability/sample_config.toml new file mode 100644 index 00000000..0989d769 --- /dev/null +++ b/config/samples/observability/sample_config.toml @@ -0,0 +1,29 @@ +[[metric]] +context = "obaas_db_system" +labels = [ "name" ] +metricsdesc = { value = "Database system resources metric" } +request = ''' +select name, value +from v$parameter +where name in ('cpu_count', 'sga_max_size', 'pga_aggregate_limit') +''' + +[[metric]] +context = "obaas_db_platform" +labels = [ "platform_name" ] +metricsdesc = { value = "Database platform" } +request = ''' +SELECT platform_name, 1 as value FROM v$database +''' + +[[metric]] +context = "obaas_top_sql" +labels = [ "sql_id", "sql_text" ] +metricsdesc = { elapsed = "SQL statement elapsed time running" } +request = ''' +select * from ( +select sql_id, elapsed_time / 1000000 as elapsed, SUBSTRB(REPLACE(sql_text,'',' '),1,55) as sql_text +from V$SQLSTATS +order by elapsed_time desc +) where ROWNUM <= 15 +''' diff --git a/config/samples/observability/v1/databaseobserver.yaml b/config/samples/observability/v1/databaseobserver.yaml new file mode 100644 index 00000000..82a7e89e --- /dev/null +++ b/config/samples/observability/v1/databaseobserver.yaml @@ -0,0 +1,81 @@ +# example +apiVersion: observability.oracle.com/v1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + DB_ROLE: SYSDBA + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v1/databaseobserver_customization_fields.yaml b/config/samples/observability/v1/databaseobserver_customization_fields.yaml new file mode 100644 index 00000000..d88caec4 --- /dev/null +++ b/config/samples/observability/v1/databaseobserver_customization_fields.yaml @@ -0,0 +1,54 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: + - "--log.level=info" + commands: + - "/oracledb_exporter" + env: + TNS_ADMIN: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + service: + ports: + - name: "metrics" + port: 9161 + targetPort: 9161 + labels: + environment: dev + + prometheus: + serviceMonitor: + endpoints: + - bearerTokenSecret: + key: '' + interval: 15s + port: metrics + labels: + release: prometheus + diff --git a/config/samples/observability/v1/databaseobserver_logs_promtail.yaml b/config/samples/observability/v1/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..8130f487 --- /dev/null +++ b/config/samples/observability/v1/databaseobserver_logs_promtail.yaml @@ -0,0 +1,74 @@ +# example +apiVersion: observability.oracle.com/v1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/observability/v1alpha1/databaseobserver.yaml b/config/samples/observability/v1alpha1/databaseobserver.yaml new file mode 100644 index 00000000..24672d8b --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver.yaml @@ -0,0 +1,80 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml b/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml new file mode 100644 index 00000000..8e0d0623 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml @@ -0,0 +1,46 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + prometheus: + serviceMonitor: + labels: + release: prometheus + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" \ No newline at end of file diff --git a/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml b/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..28592cb0 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml @@ -0,0 +1,74 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml b/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml new file mode 100644 index 00000000..74620ac7 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml @@ -0,0 +1,26 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + prometheus: + serviceMonitor: + labels: + release: prometheus \ No newline at end of file diff --git a/config/samples/observability/v1alpha1/databaseobserver_vault.yaml b/config/samples/observability/v1alpha1/databaseobserver_vault.yaml new file mode 100644 index 00000000..2fc3c9f0 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_vault.yaml @@ -0,0 +1,30 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + vaultSecretName: sample_secret + vaultOCID: ocid1.vault.oc1.. + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + prometheus: + serviceMonitor: + labels: + release: prometheus + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/observability/v4/databaseobserver.yaml b/config/samples/observability/v4/databaseobserver.yaml new file mode 100644 index 00000000..f7b310f7 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver.yaml @@ -0,0 +1,79 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v4/databaseobserver_custom_config.yaml b/config/samples/observability/v4/databaseobserver_custom_config.yaml new file mode 100644 index 00000000..dd2e3da5 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_custom_config.yaml @@ -0,0 +1,46 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus \ No newline at end of file diff --git a/config/samples/observability/v4/databaseobserver_logs_promtail.yaml b/config/samples/observability/v4/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..26a747a3 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_logs_promtail.yaml @@ -0,0 +1,76 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v4/databaseobserver_minimal.yaml b/config/samples/observability/v4/databaseobserver_minimal.yaml new file mode 100644 index 00000000..cc14fbea --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_minimal.yaml @@ -0,0 +1,26 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + prometheus: + serviceMonitor: + labels: + release: prometheus diff --git a/config/samples/observability/v4/databaseobserver_vault.yaml b/config/samples/observability/v4/databaseobserver_vault.yaml new file mode 100644 index 00000000..4f5845f6 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_vault.yaml @@ -0,0 +1,39 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + vaultSecretName: sample_secret + vaultOCID: ocid1.vault.oc1.. + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + prometheus: + serviceMonitor: + labels: + release: prometheus + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/sharding_v1alpha1_provshard.yaml b/config/samples/sharding/sharding_v1alpha1_provshard.yaml similarity index 92% rename from config/samples/sharding_v1alpha1_provshard.yaml rename to config/samples/sharding/sharding_v1alpha1_provshard.yaml index 60f5f007..a51993e6 100644 --- a/config/samples/sharding_v1alpha1_provshard.yaml +++ b/config/samples/sharding/sharding_v1alpha1_provshard.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: sharding.oracle.com/v1alpha1 diff --git a/config/samples/sharding_v1alpha1_provshard_clonespec.yaml b/config/samples/sharding/sharding_v1alpha1_provshard_clonespec.yaml similarity index 97% rename from config/samples/sharding_v1alpha1_provshard_clonespec.yaml rename to config/samples/sharding/sharding_v1alpha1_provshard_clonespec.yaml index 414544bd..acbf5a9c 100644 --- a/config/samples/sharding_v1alpha1_provshard_clonespec.yaml +++ b/config/samples/sharding/sharding_v1alpha1_provshard_clonespec.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: sharding.oracle.com/v1alpha1 diff --git a/config/samples/sharding_v1alpha1_provshard_clonespec1.yaml b/config/samples/sharding/sharding_v1alpha1_provshard_clonespec1.yaml similarity index 97% rename from config/samples/sharding_v1alpha1_provshard_clonespec1.yaml rename to config/samples/sharding/sharding_v1alpha1_provshard_clonespec1.yaml index 67988b2c..5bca5b85 100644 --- a/config/samples/sharding_v1alpha1_provshard_clonespec1.yaml +++ b/config/samples/sharding/sharding_v1alpha1_provshard_clonespec1.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: sharding.oracle.com/v1alpha1 diff --git a/config/samples/sharding_v1alpha1_provshard_orig.yaml b/config/samples/sharding/sharding_v1alpha1_provshard_orig.yaml similarity index 96% rename from config/samples/sharding_v1alpha1_provshard_orig.yaml rename to config/samples/sharding/sharding_v1alpha1_provshard_orig.yaml index fc7ba66d..0300d6ce 100644 --- a/config/samples/sharding_v1alpha1_provshard_orig.yaml +++ b/config/samples/sharding/sharding_v1alpha1_provshard_orig.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: sharding.oracle.com/v1alpha1 diff --git a/config/samples/shardingdatabase.yaml b/config/samples/sharding/shardingdatabase.yaml similarity index 82% rename from config/samples/shardingdatabase.yaml rename to config/samples/sharding/shardingdatabase.yaml index 40453b49..b639800f 100644 --- a/config/samples/shardingdatabase.yaml +++ b/config/samples/sharding/shardingdatabase.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: database.oracle.com/v1alpha1 diff --git a/config/samples/sidb/dataguardbroker.yaml b/config/samples/sidb/dataguardbroker.yaml new file mode 100644 index 00000000..644d2d40 --- /dev/null +++ b/config/samples/sidb/dataguardbroker.yaml @@ -0,0 +1,33 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: DataguardBroker +metadata: + name: dataguardbroker-sample + namespace: default +spec: + + ## Primary DB ref. This is of kind SingleInstanceDatabase + primaryDatabaseRef: "sidb-sample" + + ## Standby DB pod CRD Metadata Name to add this DB to DG config + standbyDatabaseRefs: + # - standbydatabase-sample + # - standbydatabase-sample1 + + ## Type of service . Applicable on cloud enviroments only + ## if loadBalService : false , service type = "NodePort" . else "LoadBalancer" + loadBalancer: false + + ## Protection Mode for dg configuration . MaxAvailability or MaxPerformance + protectionMode: MaxAvailability + + ## Specify the database SID to switchover thereby making it the primary. + ## Switchover is not supported when fastStartFailover is true. + setAsPrimaryDatabase: "" + + ## Enable/disable Fast-Start Failover for the dataguard configuration. + fastStartFailover: false diff --git a/config/samples/sidb/openshift_rbac.yaml b/config/samples/sidb/openshift_rbac.yaml new file mode 100644 index 00000000..6dddb80d --- /dev/null +++ b/config/samples/sidb/openshift_rbac.yaml @@ -0,0 +1,94 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- + +# Create a Security Context Contraint +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: sidb-oracle-user-scc +allowPrivilegedContainer: false +allowedCapabilities: + - SYS_NICE +runAsUser: + type: MustRunAs + uid: 54321 +seLinuxContext: + type: RunAsAny +fsGroup: + type: MustRunAs + ranges: + - min: 54321 + max: 54321 +supplementalGroups: + type: MustRunAs + ranges: + - min: 54321 + max: 54321 +--- + +# Create a Security Context Contraint +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: sidb-oracle-root-user-scc +allowPrivilegedContainer: false +allowedCapabilities: + - SYS_NICE +runAsUser: + type: MustRunAsRange + uidRangeMin: 0 + uidRangeMax: 54321 +seLinuxContext: + type: RunAsAny +fsGroup: + type: MustRunAs + ranges: + - min: 0 + max: 54321 +supplementalGroups: + type: MustRunAs + ranges: + - min: 0 + max: 5432 +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sidb-sa + namespace: sidb-ns +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: use-sidb-scc + namespace: sidb-ns +rules: + - apiGroups: + - security.openshift.io + verbs: + - use + resources: + - securitycontextconstraints + resourceNames: + - sidb-oracle-user-scc + - sidb-oracle-root-user-scc +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: use-sidb-scc + namespace: sidb-ns +subjects: + - kind: ServiceAccount + name: sidb-sa + namespace: sidb-ns +roleRef: + kind: Role + name: use-sidb-scc + apiGroup: rbac.authorization.k8s.io diff --git a/config/samples/sidb/oraclerestdataservice.yaml b/config/samples/sidb/oraclerestdataservice.yaml new file mode 100644 index 00000000..77555f47 --- /dev/null +++ b/config/samples/sidb/oraclerestdataservice.yaml @@ -0,0 +1,71 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: OracleRestDataService +metadata: + name: ords-sample + namespace: default +spec: + + ## Database ref. This can be of kind SingleInstanceDatabase. + databaseRef: "singleinstancedatabase-sample" + + ## Secret containing databaseRef password mapped to secretKey. + ## This secret will be deleted after ORDS Installation unless keepSecret set to true + adminPassword: + secretName: + secretKey: + keepSecret: true + + ## Secret containing ORDS_PUBLIC_USER password mapped to secretKey. secretKey defaults to oracle_pwd + ## This secret will be deleted after ORDS Installation unless keepSecret set to true + ordsPassword: + secretName: + secretKey: + keepSecret: true + + ## ORDS image details + image: + pullFrom: container-registry.oracle.com/database/ords-developer:latest + pullSecrets: + + ## Dedicated persistent storage is optional. If not specified, ORDS will use persistent storage from .spec.databaseRef + ## size is the required minimum size of the persistent volume + ## storageClass is used for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + ## volumeName is optional. Specify for binding to a specific PV and set storageClass to an empty string to disable automatic volume provisioning + # persistence: + # size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + # storageClass: "oci-bv" + # accessMode: "ReadWriteOnce" + # volumeName: "" + + ## Type of service Applicable on cloud enviroments only. + ## if loadBalService: false, service type = "NodePort" else "LoadBalancer" + loadBalancer: false + ## Service Annotations (Cloud provider specific), for configuring the service (e.g. private LoadBalancer service) + #serviceAnnotations: + # service.beta.kubernetes.io/oci-load-balancer-internal: "true" + + ## Set this to true to enable MongoDB API + mongoDbApi: true + + ## Deploy only on nodes having required labels. Format label_name: label_value + ## The same lables are applied to the created PVC + ## For instance if the pods need to be restricted to a particular AD + ## Leave commented if there is no such requirement + # nodeSelector: + # topology.kubernetes.io/zone: PHX-AD-1 + + ## Schemas to be ORDS Enabled in PDB of .spec.databaseRef (.spec.pdbName) + ## Schema will be created (if not exists) with password as .spec.ordsPassword + restEnableSchemas: + - schemaName: + enable: true + urlMapping: + + ## If deploying on OpenShift, change service account name to 'sidb-sa' after you run `$ oc apply -f openshift_rbac.yaml` + serviceAccountName: default diff --git a/config/samples/sidb/oraclerestdataservice_create.yaml b/config/samples/sidb/oraclerestdataservice_create.yaml new file mode 100644 index 00000000..e98ca018 --- /dev/null +++ b/config/samples/sidb/oraclerestdataservice_create.yaml @@ -0,0 +1,40 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: OracleRestDataService +metadata: + name: ords-sample + namespace: default +spec: + + ## Database ref. This can be of kind SingleInstanceDatabase. + ## Make sure the source database has been created by applying singeinstancedatabase_express.yaml + databaseRef: "xedb-sample" + + ## Secret containing databaseRef password mapped to secretKey. + adminPassword: + secretName: xedb-admin-secret + + ## Secret containing ORDS_PUBLIC_USER password mapped to secretKey. + ordsPassword: + secretName: ords-secret + + ## ORDS image details + image: + pullFrom: container-registry.oracle.com/database/ords-developer:latest + + ## Set this to true to enable MongoDB API + mongoDbApi: true + + ## PDB Schemas to be ORDS Enabled. + ## Schema will be created (if not exists) with password as .spec.ordsPassword. + restEnableSchemas: + - schemaName: schema1 + enable: true + urlMapping: + - schemaName: schema2 + enable: true + urlMapping: myschema \ No newline at end of file diff --git a/config/samples/sidb/oraclerestdataservice_secrets.yaml b/config/samples/sidb/oraclerestdataservice_secrets.yaml new file mode 100644 index 00000000..aebca546 --- /dev/null +++ b/config/samples/sidb/oraclerestdataservice_secrets.yaml @@ -0,0 +1,15 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +## ORDS password secret +apiVersion: v1 +kind: Secret +metadata: + name: ords-secret + namespace: default +type: Opaque +stringData: + ## Specify your ORDS password here + oracle_pwd: diff --git a/config/samples/sidb/singleinstancedatabase.yaml b/config/samples/sidb/singleinstancedatabase.yaml new file mode 100644 index 00000000..368762f5 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase.yaml @@ -0,0 +1,161 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: sidb-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid up to a maximum of 8 characters + sid: ORCL1 + + ## DB edition. N/A for createAs clone or standby + ## Valid values for edition are enterprise, standard, express or free + edition: enterprise + + ## Type of database. + ## Valid values for createAs are primary, clone, standby or truecache + createAs: primary + + ## Specify true to convert this standby to a snapshot standby + ## Valid only if createAs is standby + convertToSnapshotStandby: false + + ## Reference to a source primary database. + ## Valid only when createAs is clone, standby or truecache + ## The name of a source primary database resource from the same namespace + primaryDatabaseRef: "" + + ## Only valid when createAs is set to truecache + ## Accepts a semi colon separated map of `PRIMARY_PDB_SERIVCE_NAME:PRIMARY_SERVICE_NAME:TRUECACHE_SERVICE_NAME` + trueCacheServices: + # - "FREEPDB1:sales1:sales1_tc" + # - "FREEPDB1:sales2:sales2_tc" + # - "FREEPDB1:sales3:sales3_tc" + + ## Secret containing SIDB password mapped to secretKey. secretKey defaults to oracle_pwd + ## Should refer to adminPassword of Source DB if createAs is clone or standby + ## This secret will be deleted after creation of the database unless keepSecret is set to true which is the default + adminPassword: + secretName: + secretKey: + keepSecret: true + + ## DB character set. N/A for createAs clone or standby + charset: AL32UTF8 + + ## PDB name. N/A for createAs clone or standby + pdbName: orclpdb1 + + ## Enable/Disable Flashback + flashBack: false + + ## Enable/Disable ArchiveLog. Should be true to allow DB cloning + archiveLog: false + + ## Enable/Disable ForceLogging + forceLog: false + + ## Enable TCPS + enableTCPS: + + ## User specified TLS-Cert Secret + ## The following specified TLS certs will be used instead of self-signed + tcpsTlsSecret: + + ## TCPS Certificate Renewal Interval: (Valid for Self-Signed Certificates) + ## The time after which TCPS certificate will be renewed if TCPS connections are enabled. + ## tcpsCertRenewInterval can be in hours(h), minutes(m) and seconds(s); e.g. 4380h, 8760h etc. + ## Maximum value is 8760h (1 year), Minimum value is 24h; Default value is 8760h (1 year) + ## If this field is commented out/removed from the yaml, it will disable the auto-renewal feature for TCPS certificate + tcpsCertRenewInterval: 8760h + + ## N/A for createAs clone or standby + ## Specify Non-Zero value to use + ## sgaTarget and pgaAggregateTarget must be in MB + ## You cannot change these initParams for Oracle Database Express (XE) and Oracle Database Free edition + initParams: + cpuCount: 0 + processes: 0 + sgaTarget: 0 + pgaAggregateTarget: 0 + + ## Database image details + ## Base DB images are available at container-registry.oracle.com or build from https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance + ## Build patched DB images from https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/patching + ## Prebuilt DB support (https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/prebuiltdb) + ## Specify prebuiltDB as true if the image includes a prebuilt DB + ## If cloning specify an image that is of same major version as the source DB at same or different patch levels + image: + pullFrom: + pullSecrets: + prebuiltDB: false + + + ## Database storage details + ## size is the required minimum size of the persistent volume + ## storageClass is specified for dynamic volume provisioning and datafilesVolumeName for static provisioning + persistence: + ## if the storageClass supports volume expansion, patch the size attribute to expand the volume + ## Shrinking volumes is not allowed + size: 100Gi + ## set ownership/permissions for writing to datafiles volume. This is usually needed for NFS volumes. + setWritePermissions: true + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud services + storageClass: "oci-bv" + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + accessMode: "ReadWriteOnce" + ## datafilesVolumeName is optional. Specify for binding to a specific PV and set storageClass to an empty string to disable automatic volume provisioning + datafilesVolumeName: "" + ## Optionally specify a volume containing scripts in 'setup' and 'startup' folders to be executed during database setup and startup respectively. + scriptsVolumeName: "" + + ## Database pod resource details + ## cpu can be expressed in terms of cpu units and can be a plain integer or fractional value + ## memory is measured in bytes and can be expressed in plain integer or as a fixed-point number + ## using one of these quantity suffixes: E, P, T, G, M, k. + ## You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. + resources: + ## requests denotes minimum node resources required/to be utilized by the database pod + requests: + cpu: + memory: + ## limits specifies the maximum node resources that can be utilized by the database pod + limits: + cpu: + memory: + + ## Type of service . Applicable on cloud enviroments only + ## if loadBalService : false, service type = "NodePort" else "LoadBalancer" + loadBalancer: false + + ## 'listenerPort' and 'tcpsListenerPort' fields customizes port cofigurations for normal and tcps database listeners + ## 'tcpsListenerPort' will come in effect only when 'enableTCPS' field is set + ## If loadBalancer is enabled, the listenerPort, tcpsListenerPort will be the load balancer ports + ## If loadBalancer is disabled, the listenerPort, tcpsListenerPort will be the node ports(should be in range 30000-32767) + ## If enableTCPS is set, and listenerPort is commented/not mentioned in the YAML file, only TCPS endpoint will be exposed + #listenerPort: 30001 + #tcpsListenerPort: 30002 + + ## Service Annotations (Cloud provider specific), for configuring the service (e.g. private LoadBalancer service) + #serviceAnnotations: + # service.beta.kubernetes.io/oci-load-balancer-internal: "true" + + ## Deploy only on nodes having required labels. Format label_name: label_value + ## For instance if the pods need to be restricted to a particular AD + ## Leave commented if there is no such requirement. + # nodeSelector: + # topology.kubernetes.io/zone: PHX-AD-1 + + ## If deploying on OpenShift, change service account name to 'sidb-sa' after you run `$ oc apply -f openshift_rbac.yaml` + serviceAccountName: default + + ## Count of Database Pods. Only one pod will have the DB mounted and open. + ## The other replica pods will have instance up and will mount and open the DB if the primary pod dies + ## For "ReadWriteOnce" AccessMode, all the replicas will schedule on the same node that has the storage attached + ## For minimal downtime during patching set the count of replicas > 1 + ## Express edition can only have one replica and does not support patching + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_clone.yaml b/config/samples/sidb/singleinstancedatabase_clone.yaml new file mode 100644 index 00000000..438d4ea5 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_clone.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: sidb-sample-clone + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: ORCL2 + + ## The name of a source primary database resource to clone from the same namespace + ## Make sure the source database has been created by applying singeinstancedatabase_create.yaml + primaryDatabaseRef: sidb-sample + + ## Intended type of database. + createAs: clone + + ## Should refer to SourceDB secret + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: db-admin-secret + + ## Database image details + ## This image should be the same as the source DB image being cloned + ## or a patched DB image built from the souce DB image following instructions at + ## https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/patching + image: + pullFrom: container-registry.oracle.com/database/enterprise:latest + pullSecrets: oracle-container-registry-secret + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 100Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_create.yaml b/config/samples/sidb/singleinstancedatabase_create.yaml new file mode 100644 index 00000000..2a4e4bae --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_create.yaml @@ -0,0 +1,49 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + # Creates base sidb-sample. Use singleinstancedatabase_clone.yaml for cloning + # and singleinstancedatabase_patch.yaml for patching + name: sidb-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: ORCL1 + + ## DB edition. + edition: enterprise + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: db-admin-secret + + ## DB character set + charset: AL32UTF8 + + ## PDB name + pdbName: orclpdb1 + + ## Enable/Disable ArchiveLog. Should be true to allow DB cloning + archiveLog: true + + ## Database image details + image: + pullFrom: container-registry.oracle.com/database/enterprise_ru:19 + pullSecrets: oracle-container-registry-secret + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 100Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_express.yaml b/config/samples/sidb/singleinstancedatabase_express.yaml new file mode 100644 index 00000000..2cabbdaf --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_express.yaml @@ -0,0 +1,38 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: xedb-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: XE + + ## DB edition + edition: express + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: xedb-admin-secret + + ## Database image details + image: + pullFrom: container-registry.oracle.com/database/express:latest + prebuiltDB: true + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for express edition. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_free-lite.yaml b/config/samples/sidb/singleinstancedatabase_free-lite.yaml new file mode 100644 index 00000000..93b3c4c9 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_free-lite.yaml @@ -0,0 +1,35 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: freedb-lite-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: freedb-admin-secret + + ## Database image details + image: + ## Oracle Database Free Lite is only supported from DB version 23.2 onwards + pullFrom: container-registry.oracle.com/database/free:latest-lite + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for free edition. + replicas: 1 \ No newline at end of file diff --git a/config/samples/sidb/singleinstancedatabase_free-truecache.yaml b/config/samples/sidb/singleinstancedatabase_free-truecache.yaml new file mode 100644 index 00000000..c2481f7c --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_free-truecache.yaml @@ -0,0 +1,48 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: truecache-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## DB Type + createAs: truecache + + ## Reference to the source primary database. + primaryDatabaseRef: "freedb-sample" + + ## Accepts a semi colon separated list of `PRIMARY_PDB_SERIVCE_NAME:PRIMARY_SERVICE_NAME:TRUECACHE_SERVICE_NAME` + trueCacheServices: + # - "FREEPDB1:sales1:sales1_tc" + # - "FREEPDB1:sales2:sales2_tc" + # - "FREEPDB1:sales3:sales3_tc" + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: freedb-admin-secret + + ## Database image details + image: + ## Oracle True Cache is only supported with 23ai + pullFrom: container-registry.oracle.com/database/free:latest + + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for free edition. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_free.yaml b/config/samples/sidb/singleinstancedatabase_free.yaml new file mode 100644 index 00000000..6238e52e --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_free.yaml @@ -0,0 +1,36 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: freedb-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: freedb-admin-secret + + ## Database image details + image: + ## Oracle Database Free is only supported from DB version 23.2 onwards + pullFrom: container-registry.oracle.com/database/free:latest + prebuiltDB: true + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for free edition. + replicas: 1 \ No newline at end of file diff --git a/config/samples/sidb/singleinstancedatabase_patch.yaml b/config/samples/sidb/singleinstancedatabase_patch.yaml new file mode 100644 index 00000000..455bdc79 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_patch.yaml @@ -0,0 +1,51 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + # sidb-sample should have already been created using singleinstancedatabase_create.yaml + name: sidb-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: ORCL1 + + ## DB edition. + edition: enterprise + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: db-admin-secret + + ## DB character set + charset: AL32UTF8 + + ## PDB name + pdbName: orclpdb1 + + ## Enable/Disable ArchiveLog. Should be true to allow DB cloning + archiveLog: true + + ## Patched Database image + ## Using the source base image container-registry.oracle.com/database/enterprise:latest + ## build patched DB images following instructions at + ## https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/patching + image: + pullFrom: + pullSecrets: + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 100Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml b/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml new file mode 100644 index 00000000..4eec988a --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml @@ -0,0 +1,33 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: prebuiltdb-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: prebuiltdb-admin-secret + + ## Database Image + image: + pullFrom: container-registry.oracle.com/database/free:latest + prebuiltDB: true + + ## Persistence is optional for prebuilt DB image + ## if specified, the prebuilt DB datafiles are copied over to the persistant volume before DB startup + #persistence: + # size: 50Gi + # storageClass: "oci-bv" + # accessMode: "ReadWriteOnce" + + ## Count of Database Pods. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_secrets.yaml b/config/samples/sidb/singleinstancedatabase_secrets.yaml new file mode 100644 index 00000000..d98432ee --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_secrets.yaml @@ -0,0 +1,54 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +## Database Admin Password Secret +apiVersion: v1 +kind: Secret +metadata: + name: db-admin-secret + namespace: default +type: Opaque +stringData: + ## Specify your DB password here + oracle_pwd: + +--- + +## Prebuilt-Database Admin password secret +apiVersion: v1 +kind: Secret +metadata: + name: prebuiltdb-admin-secret + namespace: default +type: Opaque +stringData: + ## Specify your DB password here + oracle_pwd: + +--- + +## Oracle Database XE Admin password secret +apiVersion: v1 +kind: Secret +metadata: + name: xedb-admin-secret + namespace: default +type: Opaque +stringData: + ## Specify your DB password here + oracle_pwd: + +--- + +## Oracle Database Free Admin password secret +apiVersion: v1 +kind: Secret +metadata: + name: freedb-admin-secret + namespace: default +type: Opaque +stringData: + ## Specify your DB password here + oracle_pwd: diff --git a/config/samples/sidb/singleinstancedatabase_standby.yaml b/config/samples/sidb/singleinstancedatabase_standby.yaml new file mode 100644 index 00000000..d7ad4b23 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_standby.yaml @@ -0,0 +1,47 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + # Creates base standbydatabase-sample. Use singleinstancedatabase_clone.yaml for cloning + # and singleinstancedatabase_patch.yaml for patching + name: standbydatabase-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: ORCLS + + ## The name of a source primary database resource from the same namespace + primaryDatabaseRef: "sidb-sample" + + ## Intended type of database. + createAs: standby + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: db-admin-secret + + ## Database image details + image: + pullFrom: container-registry.oracle.com/database/enterprise_ru:19 + pullSecrets: oracle-container-registry-secret + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 100Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Type of service . Applicable on cloud enviroments only + ## if loadBalService : false, service type = "NodePort" else "LoadBalancer" + loadBalancer: false + + replicas: 1 + \ No newline at end of file diff --git a/config/samples/sidb/singleinstancedatabase_tcps.yaml b/config/samples/sidb/singleinstancedatabase_tcps.yaml new file mode 100644 index 00000000..d3e3100b --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_tcps.yaml @@ -0,0 +1,63 @@ +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + # Creates base sidb-sample. Use singleinstancedatabase_clone.yaml for cloning + # and singleinstancedatabase_patch.yaml for patching + name: sidb-sample + namespace: default +spec: + + ## Use only alphanumeric characters for sid + sid: ORCL1 + + ## DB edition. + edition: enterprise + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: db-admin-secret + + ## DB character set + charset: AL32UTF8 + + ## PDB name + pdbName: orclpdb1 + + ## Enable/Disable ArchiveLog. Should be true to allow DB cloning + archiveLog: true + + ## Enable TCPS + enableTCPS: true + + ## User specified TLS-Cert Secret + ## The following specified TLS certs will be used instead of self-signed + tcpsTlsSecret: my-tls-secret + + ## TCPS Certificate Renewal Interval: (Valid for Self-Signed Certificates) + ## The time after which TCPS certificate will be renewed if TCPS connections are enabled. + ## tcpsCertRenewInterval can be in hours(h), minutes(m) and seconds(s); e.g. 4380h, 8760h etc. + ## Maximum value is 8760h (1 year), Minimum value is 24h; Default value is 8760h (1 year) + ## If this field is commented out/removed from the yaml, it will disable the auto-renewal feature for TCPS certificate + tcpsCertRenewInterval: 8760h + + ## Database image details + image: + pullFrom: container-registry.oracle.com/database/enterprise:latest + pullSecrets: oracle-container-registry-secret + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 100Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. + replicas: 1 diff --git a/config/samples/singleinstancedatabase.yaml b/config/samples/singleinstancedatabase.yaml deleted file mode 100644 index ac140361..00000000 --- a/config/samples/singleinstancedatabase.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: SingleInstanceDatabase -metadata: - name: singleinstancedatabase-sample - namespace: default -spec: - - ## Use only alphanumeric characters for sid - sid: ORCL1 - - ## A source database ref to clone from, leave empty to create a fresh database - cloneFrom: "" - - ## NA if cloning from a SourceDB (cloneFrom is set) - edition: enterprise - - ## Should refer to SourceDB secret if cloning from a SourceDB (cloneFrom is set) - ## Secret containing SIDB password mapped to secretKey - ## This secret will be deleted after creation of the database unless keepSecret is set to true - adminPassword: - secretName: - secretKey: - keepSecret: false - - ## NA if cloning from a SourceDB (cloneFrom is set) - charset: AL32UTF8 - - ## NA if cloning from a SourceDB (cloneFrom is set) - pdbName: orclpdb1 - - ## Enable/Disable Flashback - flashBack: false - - ## Enable/Disable ArchiveLog - archiveLog: false - - ## Enable/Disable ForceLogging - forceLog: false - - ## NA if cloning from a SourceDB (cloneFrom is set) - ## Specify both sgaSize and pgaSize (in MB) or dont specify both - ## Specify Non-Zero value to use - initParams: - cpuCount: 0 - processes: 0 - sgaTarget: 0 - pgaAggregateTarget: 0 - - ## Database image details - ## Database can be patched by updating the RU version/image - ## Major version changes are not supported - image: - pullFrom: - pullSecrets: - - ## size : Minimum size of pvc | class : PVC storage Class - ## AccessMode can only accept one of ReadWriteOnce, ReadWriteMany - persistence: - size: 100Gi - storageClass: "" - accessMode: "ReadWriteMany" - - ## Type of service . Applicable on cloud enviroments only - ## if loadBalService : false, service type = "NodePort". else "LoadBalancer" - loadBalancer: false - - ## Deploy only on nodes having required labels. Format label_name : label_value - ## Leave empty if there is no such requirement. - ## Uncomment to use - # nodeSelector: - # failure-domain.beta.kubernetes.io/zone: bVCG:PHX-AD-1 - # pool: sidb - - ## Count of Database Pods. Applicable only for "ReadWriteMany" AccessMode - replicas: 1 diff --git a/config/samples/singleinstancedatabase_clone.yaml b/config/samples/singleinstancedatabase_clone.yaml deleted file mode 100644 index c324c1c3..00000000 --- a/config/samples/singleinstancedatabase_clone.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: SingleInstanceDatabase -metadata: - name: singleinstancedatabase-sample - namespace: default -spec: - - ## Use only alphanumeric characters for sid - sid: ORCL1 - - ## A source database ref to clone from, leave empty to create a fresh database - cloneFrom: "" - - ## Should refer to SourceDB secret - ## Secret containing SIDB password mapped to secretKey - ## This secret will be deleted after creation of the database unless keepSecret is set to true - adminPassword: - secretName: - secretKey: - keepSecret: false - - ## Database image details - ## Database can be patched out of place by updating the RU version/image - ## Major version changes are not supported - image: - pullFrom: - pullSecrets: - - ## size : Minimum size of pvc | class : PVC storage Class - ## AccessMode can only accept one of ReadWriteOnce, ReadWriteMany - persistence: - size: 100Gi - storageClass: "" - accessMode: "ReadWriteMany" - - ## Count of Database Pods. Applicable only for "ReadWriteMany" AccessMode - replicas: 1 diff --git a/config/samples/singleinstancedatabase_patch.yaml b/config/samples/singleinstancedatabase_patch.yaml deleted file mode 100644 index 4d2d03b0..00000000 --- a/config/samples/singleinstancedatabase_patch.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: SingleInstanceDatabase -metadata: - name: singleinstancedatabase-sample - namespace: default -spec: - - ## Use only alphanumeric characters for sid - sid: ORCL1 - - ## Secret containing SIDB password mapped to secretKey - ## This secret will be deleted after creation of the database unless keepSecret is set to true - adminPassword: - secretName: - secretKey: - keepSecret: false - - ## Patch the database by updating the RU version/image - ## Major version changes are not supported - image: - pullFrom: - pullSecrets: - - ## size : Minimum size of pvc | class : PVC storage Class - ## AccessMode can only accept one of ReadWriteOnce, ReadWriteMany - persistence: - size: 100Gi - storageClass: "" - accessMode: "ReadWriteMany" - - ## Count of Database Pods. Applicable only for "ReadWriteMany" AccessMode - replicas: 1 diff --git a/config/samples/singleinstancedatabase_prov.yaml b/config/samples/singleinstancedatabase_prov.yaml deleted file mode 100644 index 7123b117..00000000 --- a/config/samples/singleinstancedatabase_prov.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: SingleInstanceDatabase -metadata: - name: singleinstancedatabase-sample - namespace: default -spec: - - ## Use only alphanumeric characters for sid - sid: ORCL1 - - ## Secret containing SIDB password mapped to secretKey - adminPassword: - secret: - secretName: - key: - - ## Database image details - image: - version: - pullFrom: - pullSecrets: - - ## size : Minimum size of pvc | class : PVC storage Class . - ## AccessMode can only accept one of ReadWriteOnce , ReadWriteMany - persistence: - size: 100Gi - storageClass: "" - accessMode: "ReadWriteMany" - - ## Count of Database Pods. Applicable only for "ReadWriteMany" AccessMode - replicas: 1 diff --git a/config/scorecard/bases/config.yaml b/config/scorecard/bases/config.yaml index 0650fef2..fbd8c506 100644 --- a/config/scorecard/bases/config.yaml +++ b/config/scorecard/bases/config.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # kind: Configuration diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml index d9c19e9b..bf4c1e7c 100644 --- a/config/scorecard/kustomization.yaml +++ b/config/scorecard/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml index 67fa78c9..516ab755 100644 --- a/config/scorecard/patches/basic.config.yaml +++ b/config/scorecard/patches/basic.config.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # - op: add diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml index 521bc8e6..40e4fbe8 100644 --- a/config/scorecard/patches/olm.config.yaml +++ b/config/scorecard/patches/olm.config.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # - op: add diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml index ef3ca64c..f78631f3 100644 --- a/config/webhook/kustomization.yaml +++ b/config/webhook/kustomization.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # resources: diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml index afc69aba..972d71bb 100644 --- a/config/webhook/kustomizeconfig.yaml +++ b/config/webhook/kustomizeconfig.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 5cb37b30..b186a5b0 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -1,21 +1,558 @@ - --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: mcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: mlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: mlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: mpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: mcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-dataguardbroker + failurePolicy: Fail + name: mdataguardbroker.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - dataguardbrokers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-oraclerestdataservice + failurePolicy: Fail + name: moraclerestdataservice.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - oraclerestdataservices + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: mpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-singleinstancedatabase + failurePolicy: Fail + name: msingleinstancedatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - singleinstancedatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-observability-oracle-com-v1-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-observability-oracle-com-v1alpha1-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-autonomouscontainerdatabase + failurePolicy: Fail + name: vautonomouscontainerdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomouscontainerdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: vlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-singleinstancedatabase + path: /validate-database-oracle-com-v4-lrpdb failurePolicy: Fail - name: msingleinstancedatabase.kb.io + name: vlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: vpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: vshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase + failurePolicy: Fail + name: vautonomouscontainerdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -25,16 +562,173 @@ webhooks: - CREATE - UPDATE resources: - - singleinstancedatabases + - autonomouscontainerdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabase + failurePolicy: Fail + name: vautonomousdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-dataguardbroker + failurePolicy: Fail + name: vdataguardbroker.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - dataguardbrokers + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-oraclerestdataservice + failurePolicy: Fail + name: voraclerestdataservice.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - oraclerestdataservices + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: vpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: vshardingdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases sideEffects: None - ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: validating-webhook-configuration -webhooks: - admissionReviewVersions: - v1 - v1beta1 @@ -57,3 +751,63 @@ webhooks: resources: - singleinstancedatabases sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-oracle-com-v1-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-oracle-com-v1alpha1-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 08333c36..f93ad4d2 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: v1 diff --git a/controllers/database/autonomouscontainerdatabase_controller.go b/controllers/database/autonomouscontainerdatabase_controller.go new file mode 100644 index 00000000..73830eee --- /dev/null +++ b/controllers/database/autonomouscontainerdatabase_controller.go @@ -0,0 +1,658 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "encoding/json" + "errors" + "reflect" + + "github.com/go-logr/logr" + "github.com/oracle/oci-go-sdk/v65/database" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/annotations" + "github.com/oracle/oracle-database-operator/commons/k8s" + "github.com/oracle/oracle-database-operator/commons/oci" +) + +// AutonomousContainerDatabaseReconciler reconciles a AutonomousContainerDatabase object +type AutonomousContainerDatabaseReconciler struct { + KubeClient client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + + dbService oci.DatabaseService +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AutonomousContainerDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbv4.AutonomousContainerDatabase{}). + WithEventFilter(r.eventFilterPredicate()). + WithOptions(controller.Options{MaxConcurrentReconciles: 5}). + Complete(r) +} + +func (r *AutonomousContainerDatabaseReconciler) eventFilterPredicate() predicate.Predicate { + pred := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + desiredACD, acdOk := e.ObjectNew.(*dbv4.AutonomousContainerDatabase) + if acdOk { + oldACD := e.ObjectOld.(*dbv4.AutonomousContainerDatabase) + + if !reflect.DeepEqual(oldACD.Status, desiredACD.Status) || + (controllerutil.ContainsFinalizer(oldACD, dbv4.LastSuccessfulSpec) != controllerutil.ContainsFinalizer(desiredACD, dbv4.LastSuccessfulSpec)) || + (controllerutil.ContainsFinalizer(oldACD, dbv4.ACDFinalizer) != controllerutil.ContainsFinalizer(desiredACD, dbv4.ACDFinalizer)) { + // Don't enqueue if the status, lastSucSpec, or the finalizler changes + return false + } + + return true + } + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Do not trigger reconciliation when the object is deleted from the cluster. + _, acdOk := e.Object.(*dbv4.AutonomousContainerDatabase) + return !acdOk + }, + } + + return pred +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomouscontainerdatabases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomouscontainerdatabases/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabases,verbs=get;list;watch;create;update;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile +func (r *AutonomousContainerDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) + + var err error + var ociACD *dbv4.AutonomousContainerDatabase + + // Get the autonomousdatabase instance from the cluster + acd := &dbv4.AutonomousContainerDatabase{} + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, acd); err != nil { + // Ignore not-found errors, since they can't be fixed by an immediate requeue. + // No need to change the since we don't know if we obtain the object. + if apiErrors.IsNotFound(err) { + return emptyResult, nil + } + // Failed to get ACD, so we don't need to update the status + return emptyResult, err + } + + /****************************************************************** + * Get OCI database client + ******************************************************************/ + if err := r.setupOCIClients(logger, acd); err != nil { + logger.Error(err, "Fail to setup OCI clients") + + return r.manageError(logger, acd, err) + } + + logger.Info("OCI clients configured succesfully") + + /****************************************************************** + * Get OCI AutonomousDatabase + ******************************************************************/ + + if acd.Spec.AutonomousContainerDatabaseOCID != nil { + resp, err := r.dbService.GetAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) + if err != nil { + return r.manageError(logger, acd, err) + } + + ociACD = &dbv4.AutonomousContainerDatabase{} + ociACD.UpdateFromOCIACD(resp.AutonomousContainerDatabase) + } + + /****************************************************************** + * Requeue if the ACD is in an intermediate state + * No-op if the ACD OCID is nil + * To get the latest status, execute before all the reconcile logic + ******************************************************************/ + needsRequeue, err := r.validateLifecycleState(logger, acd, ociACD) + if err != nil { + return r.manageError(logger, acd, err) + } + + if needsRequeue { + return requeueResult, nil + } + + /****************************************************************** + * Cleanup the resource if the resource is to be deleted. + * Deletion timestamp will be added to a object before it is deleted. + * Kubernetes server calls the clean up function if a finalizer exitsts, and won't delete the real object until + * all the finalizers are removed from the object metadata. + * Refer to this page for more details of using finalizers: https://kubernetes.io/blog/2022/05/14/using-finalizers-to-control-deletion/ + ******************************************************************/ + exitReconcile, err := r.validateCleanup(logger, acd) + if err != nil { + return r.manageError(logger, acd, err) + } + + if exitReconcile { + return emptyResult, nil + } + + /****************************************************************** + * Register/unregister the finalizer + ******************************************************************/ + if err := r.validateFinalizer(acd); err != nil { + return r.manageError(logger, acd, err) + } + + /****************************************************************** + * Validate operations + ******************************************************************/ + exitReconcile, result, err := r.validateOperation(logger, acd, ociACD) + if err != nil { + return r.manageError(logger, acd, err) + } + if exitReconcile { + return result, nil + } + + /****************************************************************** + * Update the status and requeue if it's in an intermediate state + ******************************************************************/ + if err := r.KubeClient.Status().Update(context.TODO(), acd); err != nil { + return r.manageError(logger, acd, err) + } + + if dbv4.IsACDIntermediateState(acd.Status.LifecycleState) { + logger.WithName("IsIntermediateState").Info("Current lifecycleState is " + string(acd.Status.LifecycleState) + "; reconcile queued") + return requeueResult, nil + } + + if err := r.patchLastSuccessfulSpec(acd); err != nil { + return r.manageError(logger, acd, err) + } + + logger.Info("AutonomousContainerDatabase reconciles successfully") + + return emptyResult, nil +} + +func (r *AutonomousContainerDatabaseReconciler) setupOCIClients(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) error { + var err error + + authData := oci.ApiKeyAuth{ + ConfigMapName: acd.Spec.OCIConfig.ConfigMapName, + SecretName: acd.Spec.OCIConfig.SecretName, + Namespace: acd.GetNamespace(), + } + + provider, err := oci.GetOciProvider(r.KubeClient, authData) + if err != nil { + return err + } + + r.dbService, err = oci.NewDatabaseService(logger, r.KubeClient, provider) + if err != nil { + return err + } + + return nil +} + +func (r *AutonomousContainerDatabaseReconciler) manageError(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase, issue error) (ctrl.Result, error) { + l := logger.WithName("manageError") + + // Has synced at least once + if acd.Status.LifecycleState != "" { + // Send event + r.Recorder.Event(acd, corev1.EventTypeWarning, "UpdateFailed", issue.Error()) + + var finalIssue = issue + + // Roll back + specChanged, err := r.getACD(logger, acd) + if err != nil { + finalIssue = k8s.CombineErrors(finalIssue, err) + } + + // We don't exit the Reconcile if the spec has changed + // becasue it will exit anyway after the manageError is called. + if specChanged { + if err := r.KubeClient.Update(context.TODO(), acd); err != nil { + finalIssue = k8s.CombineErrors(finalIssue, err) + } + } + + l.Error(finalIssue, "UpdateFailed") + + return emptyResult, nil + } else { + // Send event + r.Recorder.Event(acd, corev1.EventTypeWarning, "CreateFailed", issue.Error()) + + return emptyResult, issue + } +} + +// validateLifecycleState gets and validates the current lifecycleState +func (r *AutonomousContainerDatabaseReconciler) validateLifecycleState(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase, ociACD *dbv4.AutonomousContainerDatabase) (needsRequeue bool, err error) { + if ociACD == nil { + return false, nil + } + + l := logger.WithName("validateLifecycleState") + + // Special case: Once the status changes to AVAILABLE after the provision operation, the reconcile stops. + // The backup starts right after the provision operation and the controller is not able to track the operation in this case. + // To prevent this issue, requeue the reconcile if the previous status is PROVISIONING and we ignore the status change + // until it becomes BACKUP_IN_PROGRESS. + if acd.Status.LifecycleState == database.AutonomousContainerDatabaseLifecycleStateProvisioning && + ociACD.Status.LifecycleState != database.AutonomousContainerDatabaseLifecycleStateBackupInProgress { + l.Info("Provisioning the ACD and waiting for the backup to start; reconcile queued") + return true, nil + } + + acd.Status = ociACD.Status + + if err := r.KubeClient.Status().Update(context.TODO(), acd); err != nil { + return false, err + } + + if dbv4.IsACDIntermediateState(ociACD.Status.LifecycleState) { + l.Info("LifecycleState is " + string(acd.Status.LifecycleState) + "; reconcile queued") + return true, nil + } + + return false, nil +} + +func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) (exitReconcile bool, err error) { + l := logger.WithName("validateCleanup") + + isACDToBeDeleted := acd.GetDeletionTimestamp() != nil + + if !isACDToBeDeleted { + return false, nil + } + + if controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { + if acd.Status.LifecycleState == database.AutonomousContainerDatabaseLifecycleStateTerminating { + l.Info("Resource is already in TERMINATING state") + // Delete in progress, continue with the reconcile logic + return false, nil + } + + if acd.Status.LifecycleState == database.AutonomousContainerDatabaseLifecycleStateTerminated { + // The acd has been deleted. Remove the finalizer and exit the reconcile. + // Once all finalizers have been removed, the object will be deleted. + l.Info("Resource is already in TERMINATED state; remove the finalizer") + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { + return false, err + } + return true, nil + } + + if acd.Spec.AutonomousContainerDatabaseOCID == nil { + l.Info("Missing AutonomousContainerDatabaseOCID to terminate Autonomous Container Database; remove the finalizer anyway", "Name", acd.Name, "Namespace", acd.Namespace) + // Remove finalizer anyway. + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { + return false, err + } + return true, nil + } + + if acd.Spec.Action != dbv4.AcdActionTerminate { + // Run finalization logic for finalizer. If the finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + l.Info("Terminating Autonomous Container Database") + acd.Spec.Action = dbv4.AcdActionTerminate + if err := r.KubeClient.Update(context.TODO(), acd); err != nil { + return false, err + } + // Exit the reconcile since we have updated the spec + return true, nil + } + + // Continue with the reconcile logic + return false, nil + } + + // Exit the Reconcile since the to-be-deleted resource doesn't has a finalizer + return true, nil +} + +func (r *AutonomousContainerDatabaseReconciler) validateFinalizer(acd *dbv4.AutonomousContainerDatabase) error { + // Delete is not schduled. Update the finalizer for this CR if hardLink is present + if acd.Spec.HardLink != nil { + if *acd.Spec.HardLink && !controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { + if err := k8s.AddFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { + return err + } + } else if !*acd.Spec.HardLink && controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { + return err + } + } + } + + return nil +} + +func (r *AutonomousContainerDatabaseReconciler) validateOperation( + logger logr.Logger, + acd *dbv4.AutonomousContainerDatabase, + ociACD *dbv4.AutonomousContainerDatabase) (exitReconcile bool, result ctrl.Result, err error) { + + l := logger.WithName("validateOperation") + + lastSpec, err := acd.GetLastSuccessfulSpec() + if err != nil { + return false, emptyResult, err + } + + // If lastSucSpec is nil, then it's CREATE or BIND opertaion + if lastSpec == nil { + if acd.Spec.AutonomousContainerDatabaseOCID == nil { + l.Info("Create operation") + + err := r.createACD(logger, acd) + if err != nil { + return false, emptyResult, err + } + + // Update the ACD OCID + if err := r.updateCR(acd); err != nil { + return false, emptyResult, err + } + + l.Info("AutonomousContainerDatabaseOCID updated; exit reconcile") + return true, emptyResult, nil + } else { + l.Info("Bind operation") + + _, err := r.getACD(logger, acd) + if err != nil { + return false, emptyResult, err + } + + if err := r.updateCR(acd); err != nil { + return false, emptyResult, err + } + + l.Info("spec updated; exit reconcile") + return false, emptyResult, nil + } + } + + // If it's not CREATE or BIND opertaion, then UPDATE or SYNC + // Compare with the lastSucSpec.details. If the details are different, it means that the user updates the spec. + lastDifACD := acd.DeepCopy() + + lastDetailsChanged, err := lastDifACD.RemoveUnchangedSpec(*lastSpec) + if err != nil { + return false, emptyResult, err + } + + if lastDetailsChanged { + l.Info("Update operation") + + // Double check if the user input spec is actually different from the spec in OCI. If so, then update the resource. + + difACD := acd.DeepCopy() + + ociDetailsChanged, err := difACD.RemoveUnchangedSpec(ociACD.Spec) + if err != nil { + return false, emptyResult, err + } + + if ociDetailsChanged { + ociReqSent, specChanged, err := r.updateACD(logger, acd, difACD) + if err != nil { + return false, emptyResult, err + } + + // Requeue the k8s request if an OCI request is sent, since OCI can only process one request at a time. + if ociReqSent { + if specChanged { + if err := r.KubeClient.Update(context.TODO(), acd); err != nil { + return false, emptyResult, err + } + + l.Info("spec updated; exit reconcile") + return false, emptyResult, nil + + } else { + l.Info("reconcile queued") + return true, requeueResult, nil + } + } + } + + // Stop the update and patch the lastSpec when the current ACD matches the oci ACD. + if err := r.patchLastSuccessfulSpec(acd); err != nil { + return false, emptyResult, err + } + + return false, emptyResult, nil + + } else { + l.Info("No operation specified; sync the resource") + + // The user doesn't change the spec and the controller should pull the spec from the OCI. + specChanged, err := r.getACD(logger, acd) + if err != nil { + return false, emptyResult, err + } + + if specChanged { + l.Info("The local spec doesn't match the oci's spec; update the CR") + if err := r.updateCR(acd); err != nil { + return false, emptyResult, err + } + + return true, emptyResult, nil + } + return false, emptyResult, nil + } +} + +func (r *AutonomousContainerDatabaseReconciler) updateCR(acd *dbv4.AutonomousContainerDatabase) error { + // Update the lastSucSpec + if err := acd.UpdateLastSuccessfulSpec(); err != nil { + return err + } + + if err := r.KubeClient.Update(context.TODO(), acd); err != nil { + return err + } + return nil +} + +func (r *AutonomousContainerDatabaseReconciler) patchLastSuccessfulSpec(acd *dbv4.AutonomousContainerDatabase) error { + specBytes, err := json.Marshal(acd.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + dbv4.LastSuccessfulSpec: string(specBytes), + } + + annotations.PatchAnnotations(r.KubeClient, acd, anns) + + return nil +} + +func (r *AutonomousContainerDatabaseReconciler) createACD(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) error { + logger.WithName("createACD").Info("Sending CreateAutonomousContainerDatabase request to OCI") + + resp, err := r.dbService.CreateAutonomousContainerDatabase(acd) + if err != nil { + return err + } + + acd.UpdateFromOCIACD(resp.AutonomousContainerDatabase) + + return nil +} + +func (r *AutonomousContainerDatabaseReconciler) getACD(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) (bool, error) { + if acd == nil { + return false, errors.New("AutonomousContainerDatabase OCID is missing") + } + + logger.WithName("getACD").Info("Sending GetAutonomousContainerDatabase request to OCI") + + // Get the information from OCI + resp, err := r.dbService.GetAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) + if err != nil { + return false, err + } + + specChanged := acd.UpdateFromOCIACD(resp.AutonomousContainerDatabase) + + return specChanged, nil +} + +// updateACD returns true if an OCI request is sent. +// The AutonomousContainerDatabase is updated with the returned object from the OCI requests. +func (r *AutonomousContainerDatabaseReconciler) updateACD( + logger logr.Logger, + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (ociReqSent bool, specChanged bool, err error) { + + validations := []func(logr.Logger, *dbv4.AutonomousContainerDatabase, *dbv4.AutonomousContainerDatabase) (bool, bool, error){ + r.validateGeneralFields, + r.validateDesiredLifecycleState, + } + + for _, op := range validations { + ociReqSent, specChanged, err := op(logger, acd, difACD) + if err != nil { + return false, false, err + } + + if ociReqSent { + return true, specChanged, nil + } + } + + return false, false, nil +} + +func (r *AutonomousContainerDatabaseReconciler) validateGeneralFields( + logger logr.Logger, + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (sent bool, requeue bool, err error) { + + if difACD.Spec.DisplayName == nil && + difACD.Spec.PatchModel == "" && + difACD.Spec.FreeformTags == nil { + return false, false, nil + } + + logger.WithName("validateGeneralFields").Info("Sending UpdateAutonomousDatabase request to OCI") + + resp, err := r.dbService.UpdateAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID, difACD) + if err != nil { + return false, false, err + } + + acd.UpdateStatusFromOCIACD(resp.AutonomousContainerDatabase) + + return true, false, nil +} + +func (r *AutonomousContainerDatabaseReconciler) validateDesiredLifecycleState( + logger logr.Logger, + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (sent bool, specChanged bool, err error) { + + if difACD.Spec.Action == dbv4.AcdActionBlank { + return false, false, nil + } + + l := logger.WithName("validateDesiredLifecycleState") + + switch difACD.Spec.Action { + case dbv4.AcdActionRestart: + l.Info("Sending RestartAutonomousContainerDatabase request to OCI") + + resp, err := r.dbService.RestartAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) + if err != nil { + return false, false, err + } + + acd.Status.LifecycleState = resp.LifecycleState + case dbv4.AcdActionTerminate: + l.Info("Sending TerminateAutonomousContainerDatabase request to OCI") + + _, err := r.dbService.TerminateAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) + if err != nil { + return false, false, err + } + + acd.Status.LifecycleState = database.AutonomousContainerDatabaseLifecycleStateTerminating + default: + return false, false, errors.New("unknown lifecycleState") + } + + acd.Spec.Action = dbv4.AcdActionBlank + + return true, true, nil +} diff --git a/controllers/database/autonomousdatabase_controller.go b/controllers/database/autonomousdatabase_controller.go index 2f250489..37ae1b14 100644 --- a/controllers/database/autonomousdatabase_controller.go +++ b/controllers/database/autonomousdatabase_controller.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,510 +40,795 @@ package controllers import ( "context" + "errors" "fmt" "reflect" + "regexp" + "strings" + "time" "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/database" - "github.com/oracle/oci-go-sdk/v45/secrets" - "github.com/oracle/oci-go-sdk/v45/workrequests" + "github.com/oracle/oci-go-sdk/v65/database" apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" + + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - adbutil "github.com/oracle/oracle-database-operator/commons/autonomousdatabase" - "github.com/oracle/oracle-database-operator/commons/finalizer" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/k8s" "github.com/oracle/oracle-database-operator/commons/oci" ) -// AutonomousDatabaseReconciler reconciles a AutonomousDatabase object +// name of our custom finalizer +const ADB_FINALIZER = "database.oracle.com/adb-finalizer" + +var requeueResult ctrl.Result = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second} +var emptyResult ctrl.Result = ctrl.Result{} + +// *AutonomousDatabaseReconciler reconciles a AutonomousDatabase object type AutonomousDatabaseReconciler struct { KubeClient client.Client Log logr.Logger Scheme *runtime.Scheme + Recorder record.EventRecorder - currentLogger logr.Logger + dbService oci.DatabaseService } // SetupWithManager function func (r *AutonomousDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dbv1alpha1.AutonomousDatabase{}). - WithEventFilter(r.eventFilterPredicate()). + For(&dbv4.AutonomousDatabase{}). + Watches( + &dbv4.AutonomousDatabaseRestore{}, + handler.EnqueueRequestsFromMapFunc(r.enqueueMapFn), + ). + WithEventFilter(predicate.And(r.eventFilterPredicate(), r.watchPredicate())). WithOptions(controller.Options{MaxConcurrentReconciles: 50}). // ReconcileHandler is never invoked concurrently with the same object. Complete(r) } +func (r *AutonomousDatabaseReconciler) enqueueMapFn(ctx context.Context, o client.Object) []reconcile.Request { + reqs := make([]reconcile.Request, len(o.GetOwnerReferences())) + + for _, owner := range o.GetOwnerReferences() { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: owner.Name, + Namespace: o.GetNamespace(), + }, + }) + } + + return reqs +} + +func (r *AutonomousDatabaseReconciler) watchPredicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + _, restoreOk := e.Object.(*dbv4.AutonomousDatabaseRestore) + // Don't enqueue if the event is from Backup or Restore + return !restoreOk + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // Enqueue the update event only when the status changes the first time + desiredRestore, restoreOk := e.ObjectNew.(*dbv4.AutonomousDatabaseRestore) + if restoreOk { + oldRestore := e.ObjectOld.(*dbv4.AutonomousDatabaseRestore) + return oldRestore.Status.Status == "" && desiredRestore.Status.Status != "" + } + + // Enqueue if the event is not from Backup or Restore + return true + }, + } +} func (r *AutonomousDatabaseReconciler) eventFilterPredicate() predicate.Predicate { - pred := predicate.Funcs{ + return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldADB := e.ObjectOld.DeepCopyObject().(*dbv1alpha1.AutonomousDatabase) - newADB := e.ObjectNew.DeepCopyObject().(*dbv1alpha1.AutonomousDatabase) - - // Reconciliation should NOT happen if the lastSuccessfulSpec annotation or status.state changes. - oldSucSpec := oldADB.GetAnnotations()[dbv1alpha1.LastSuccessfulSpec] - newSucSpec := newADB.GetAnnotations()[dbv1alpha1.LastSuccessfulSpec] - - lastSucSpecChanged := oldSucSpec != newSucSpec - stateChanged := oldADB.Status.LifecycleState != newADB.Status.LifecycleState - if lastSucSpecChanged || stateChanged { - // Don't enqueue request - return false + // source object can be AutonomousDatabase, AutonomousDatabaseBackup, or AutonomousDatabaseRestore + desiredAdb, adbOk := e.ObjectNew.(*dbv4.AutonomousDatabase) + if adbOk { + oldAdb := e.ObjectOld.(*dbv4.AutonomousDatabase) + + specChanged := !reflect.DeepEqual(oldAdb.Spec, desiredAdb.Spec) + statusChanged := !reflect.DeepEqual(oldAdb.Status, desiredAdb.Status) + + if (!specChanged && statusChanged) || + (controllerutil.ContainsFinalizer(oldAdb, ADB_FINALIZER) != controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER)) { + // Don't enqueue in the folowing condition: + // 1. only status changes 2. ADB_FINALIZER changes + return false + } + + return true } - // Enqueue request return true }, DeleteFunc: func(e event.DeleteEvent) bool { - // Do not trigger reconciliation when the real object is deleted from the cluster. - return false + // Do not trigger reconciliation when the object is deleted from the cluster. + _, adbOk := e.Object.(*dbv4.AutonomousDatabase) + return !adbOk }, } - - return pred } // +kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabases/status,verbs=update;patch +// +kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabaserestores,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups=database.oracle.com,resources=autonomouscontainerdatabases,verbs=get;list // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=create;get;list;update // +kubebuilder:rbac:groups="",resources=configmaps;secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// Reconcile is the funtion that the operator calls every time when the reconciliation loop is triggered. -// It go to the beggining of the reconcile if an error is returned. We won't return a error if it is related -// to OCI, because the issues cannot be solved by re-run the reconcile. func (r *AutonomousDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - r.currentLogger = r.Log.WithValues("Namespaced/Name", req.NamespacedName) + logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) + + var err error + // Indicates whether spec has been changed at the end of the reconcile. + var specChanged bool = false // Get the autonomousdatabase instance from the cluster - adb := &dbv1alpha1.AutonomousDatabase{} - if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, adb); err != nil { + desiredAdb := &dbv4.AutonomousDatabase{} + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, desiredAdb); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. - // No need to change the since we don't know if we obtain the object. - if !apiErrors.IsNotFound(err) { - return ctrl.Result{}, err + if apiErrors.IsNotFound(err) { + return emptyResult, nil } + return emptyResult, err } /****************************************************************** - * Get OCI database client and work request client + * Get OCI database client ******************************************************************/ - authData := oci.APIKeyAuth{ - ConfigMapName: adb.Spec.OCIConfig.ConfigMapName, - SecretName: adb.Spec.OCIConfig.SecretName, - Namespace: adb.GetNamespace(), + if err := r.setupOCIClients(logger, desiredAdb); err != nil { + return r.manageError( + logger.WithName("setupOCIClients"), + desiredAdb, + fmt.Errorf("Failed to get OCI Database Client: %w", err)) + } + + logger.Info("OCI clients configured succesfully") + + /****************************************************************** + * Fill the empty fields in the local resource at the beginning of + * the reconciliation. + ******************************************************************/ + // Fill the empty fields in the AutonomousDatabase resource by + // syncing up with the Autonomous Database in OCI. Only the fields + // that have nil values will be overwritten. + var stateBeforeFirstSync = desiredAdb.Status.LifecycleState + if _, err = r.syncAutonomousDatabase(logger, desiredAdb, false); err != nil { + return r.manageError( + logger.WithName("syncAutonomousDatabase"), + desiredAdb, + fmt.Errorf("Failed to sync AutonomousDatabase: %w", err)) } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) - if err != nil { - r.currentLogger.Error(err, "Fail to get OCI provider") - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + // If the lifecycle state changes from any other states to + // AVAILABLE and spec.action is an empty string, it means that + // the resource in OCI just finished the work, and the spec + // of the Autonomous Database in OCI might also change. + // This is because OCI won't update the spec until the work + // completes. In this case, we need to update the spec of + // the resource in local cluster. + if stateBeforeFirstSync != database.AutonomousDatabaseLifecycleStateAvailable && + desiredAdb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateAvailable { + if specChanged, err = r.syncAutonomousDatabase(logger, desiredAdb, true); err != nil { + return r.manageError( + logger.WithName("syncAutonomousDatabase"), + desiredAdb, + fmt.Errorf("Failed to sync AutonomousDatabase: %w", err)) } - return ctrl.Result{}, nil } - dbClient, err := database.NewDatabaseClientWithConfigurationProvider(provider) - if err != nil { - r.currentLogger.Error(err, "Fail to get OCI database client") + /****************************************************************** + * Determine if the external resource needs to be cleaned up. + * If yes, delete the Autonomous Database in OCI and exits the + * reconcile function immediately. + * + * There is no need to check the other fields if the resource is + * under deletion. This method should be executed soon after the OCI + * database client is obtained and the local resource is synced in + * the above two steps. + * + * Kubernetes server calls the clean up function if a finalizer exitsts, + * and won't delete the object until all the finalizers are removed + * from the object metadata. + ******************************************************************/ + if desiredAdb.GetDeletionTimestamp().IsZero() { + // The Autonomous Database is not being deleted. Update the finalizer. + if desiredAdb.Spec.HardLink != nil && + *desiredAdb.Spec.HardLink && + !controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { + + if err := k8s.AddFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to add finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } else if desiredAdb.Spec.HardLink != nil && + !*desiredAdb.Spec.HardLink && + controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to remove finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } + } else { + // The Autonomous Database is being deleted + if controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { + if dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + // No-op + } else if desiredAdb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated { + // The Autonomous Database in OCI has been deleted. Remove the finalizer. + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to remove finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } else { + // Remove the Autonomous Database in OCI. + // Change the action to Terminate and proceed with the rest of the reconcile logic + desiredAdb.Spec.Action = "Terminate" + } } - return ctrl.Result{}, nil } - secretClient, err := secrets.NewSecretsClientWithConfigurationProvider(provider) - if err != nil { - r.currentLogger.Error(err, "Fail to get OCI secret client") + if !dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + /****************************************************************** + * Perform operations + ******************************************************************/ + var specChangedAfterOperation bool + specChangedAfterOperation, err = r.performOperation(logger, desiredAdb) + if err != nil { + return r.manageError( + logger.WithName("performOperation"), + desiredAdb, + fmt.Errorf("Failed to operate database action: %w", err)) + } - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + if specChangedAfterOperation { + specChanged = true } - return ctrl.Result{}, nil - } - workClient, err := workrequests.NewWorkRequestClientWithConfigurationProvider(provider) - if err != nil { - r.currentLogger.Error(err, "Fail to get OCI work request client") + /****************************************************************** + * Sync AutonomousDatabase Backups from OCI. + * The backups will not be synced when the lifecycle state is + * TERMINATING or TERMINATED. + ******************************************************************/ + if desiredAdb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminating && + desiredAdb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminated { + if err := r.syncBackupResources(logger, desiredAdb); err != nil { + return r.manageError(logger.WithName("syncBackupResources"), desiredAdb, err) + } + } - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + /***************************************************** + * Validate Wallet + *****************************************************/ + if err := r.validateWallet(logger, desiredAdb); err != nil { + return r.manageError( + logger.WithName("validateWallet"), + desiredAdb, + fmt.Errorf("Failed to validate Wallet: %w", err)) } - return ctrl.Result{}, nil } - r.currentLogger.Info("OCI provider configured succesfully") - /****************************************************************** - * Register/unregister finalizer - * Deletion timestamp will be added to a object before it is deleted. - * Kubernetes server calls the clean up function if a finalizer exitsts, and won't delete the real object until - * all the finalizers are removed from the object metadata. - * Refer to this page for more details of using finalizers: https://kubernetes.io/blog/2021/05/14/using-finalizers-to-control-deletion/ + * Update the Autonomous Database at the end of every reconcile. ******************************************************************/ - if adb.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted - if *adb.Spec.HardLink && !finalizer.HasFinalizer(adb) { - finalizer.Register(r.KubeClient, adb) - r.currentLogger.Info("Finalizer registered successfully.") - - } else if !*adb.Spec.HardLink && finalizer.HasFinalizer(adb) { - finalizer.Unregister(r.KubeClient, adb) - r.currentLogger.Info("Finalizer unregistered successfully.") - } - } else { - // The object is being deleted - if adb.Spec.Details.AutonomousDatabaseOCID == nil { - r.currentLogger.Info("Autonomous Database OCID is missing. Remove the resource only.") - } else if adb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminating && - adb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminated { - // Don't send terminate request if the database is terminating or already terminated - r.currentLogger.Info("Terminate Autonomous Database: " + *adb.Spec.Details.DbName) - if _, err := oci.DeleteAutonomousDatabase(dbClient, *adb.Spec.Details.AutonomousDatabaseOCID); err != nil { - r.currentLogger.Error(err, "Fail to terminate Autonomous Database") - - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - } + if specChanged { + if err := r.KubeClient.Update(context.TODO(), desiredAdb); err != nil { + return r.manageError( + logger.WithName("updateSpec"), + desiredAdb, + fmt.Errorf("Failed to update AutonomousDatabase spec: %w", err)) } + // Immediately exit the reconcile loop if the resource is updated, and let + // the next run continue. + return emptyResult, nil + } - finalizer.Unregister(r.KubeClient, adb) - r.currentLogger.Info("Finalizer unregistered successfully.") - // Stop reconciliation as the item is being deleted - return ctrl.Result{}, nil + updateCondition(desiredAdb, nil) + if err := r.KubeClient.Status().Update(context.TODO(), desiredAdb); err != nil { + return r.manageError( + logger, + desiredAdb, + fmt.Errorf("Failed to update AutonomousDatabase status: %w", err)) } /****************************************************************** - * Determine which Database operations need to be executed by checking the changes to spec.details. - * There are three scenario: - * 1. provision operation. The AutonomousDatabaseOCID is missing, and the LastSucSpec annotation is missing. - * 2. bind operation. The AutonomousDatabaseOCID is provided, but the LastSucSpec annotation is missing. - * 3. update operation. Every changes other than the above two cases goes here. - * Afterwards, update the resource from the remote database in OCI. This step will be executed right after - * the above three cases during every reconcile. - /******************************************************************/ - lastSucSpec, err := adb.GetLastSuccessfulSpec() - if err != nil { - return ctrl.Result{}, err + * Requeue the request in the following cases: + * 1. the ADB is in intermediate state + * 2. the ADB is terminated, but the finalizer is not yet removed. + ******************************************************************/ + if dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + logger. + WithName("IsAdbIntermediateState"). + Info("LifecycleState is " + string(desiredAdb.Status.LifecycleState) + "; reconciliation queued") + return requeueResult, nil + } else { + logger.Info("AutonomousDatabase reconciles successfully") + return emptyResult, nil + } +} + +func (r *AutonomousDatabaseReconciler) setupOCIClients(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + var err error + + authData := oci.ApiKeyAuth{ + ConfigMapName: adb.Spec.OciConfig.ConfigMapName, + SecretName: adb.Spec.OciConfig.SecretName, + Namespace: adb.GetNamespace(), } - if lastSucSpec == nil || !reflect.DeepEqual(lastSucSpec.Details, adb.Spec.Details) { - // spec.details changes - if adb.Spec.Details.AutonomousDatabaseOCID == nil && lastSucSpec == nil { - // If no AutonomousDatabaseOCID specified, create a database - // Update from yaml file might not have an AutonomousDatabaseOCID. Don't create a database if it already has last successful spec. - r.currentLogger.Info("AutonomousDatabase provisioning") + provider, err := oci.GetOciProvider(r.KubeClient, authData) + if err != nil { + return err + } - resp, err := oci.CreateAutonomousDatabase(r.currentLogger, r.KubeClient, dbClient, secretClient, adb) + r.dbService, err = oci.NewDatabaseService(logger, r.KubeClient, provider) + if err != nil { + return err + } - if err != nil { - r.currentLogger.Error(err, "Fail to provision and get Autonomous Database OCID") + return nil +} - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil - } +// Upates the status with the error and returns an empty result +func (r *AutonomousDatabaseReconciler) manageError(logger logr.Logger, adb *dbv4.AutonomousDatabase, err error) (ctrl.Result, error) { + l := logger.WithName("manageError") - adb.Spec.Details.AutonomousDatabaseOCID = resp.AutonomousDatabase.Id + l.Error(err, "Error occured") - // Update status.state - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } + updateCondition(adb, err) + if err := r.KubeClient.Status().Update(context.TODO(), adb); err != nil { + return emptyResult, fmt.Errorf("Failed to update status: %w", err) + } + return emptyResult, nil +} - if err := oci.WaitUntilWorkCompleted(r.currentLogger, workClient, resp.OpcWorkRequestId); err != nil { - r.currentLogger.Error(err, "Fail to watch the status of provision request. opcWorkRequestID = "+*resp.OpcWorkRequestId) +const CONDITION_TYPE_AVAILABLE = "Available" +const CONDITION_REASON_AVAILABLE = "Available" +const CONDITION_TYPE_RECONCILE_QUEUED = "ReconcileQueued" +const CONDITION_REASON_RECONCILE_QUEUED = "LastReconcileQueued" +const CONDITION_TYPE_RECONCILE_ERROR = "ReconfileError" +const CONDITION_REASON_RECONCILE_ERROR = "LastReconcileError" - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - } +func updateCondition(adb *dbv4.AutonomousDatabase, err error) { + var condition metav1.Condition + var errMsg string - r.currentLogger.Info("AutonomousDatabase " + *adb.Spec.Details.DbName + " provisioned succesfully") + if err != nil { + errMsg = err.Error() + } - } else if adb.Spec.Details.AutonomousDatabaseOCID != nil && lastSucSpec == nil { - // Binding operation. We have the database ID but hasn't gotten complete infromation from OCI. - // The next step is to get AutonomousDatabse details from a remote instance. + // Clean up the Conditions array + if len(adb.Status.Conditions) > 0 { + var allConditions = []string{ + CONDITION_TYPE_AVAILABLE, + CONDITION_TYPE_RECONCILE_QUEUED, + CONDITION_TYPE_RECONCILE_ERROR} - adb, err = oci.GetAutonomousDatabaseResource(r.currentLogger, dbClient, adb) - if err != nil { - r.currentLogger.Error(err, "Fail to get Autonomous Database") + for _, conditionType := range allConditions { + meta.RemoveStatusCondition(&adb.Status.Conditions, conditionType) + } + } - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - return ctrl.Result{}, nil - } - } else { - // The object has successfully synced with the remote database at least once. - // Update the Autonomous Database in OCI. - // Change to the lifecycle state has the highest priority. - - // Get the Autonomous Database OCID from the last successful spec if not presented. - // This happens when a database reference is already created in the cluster. User updates the target CR (specifying metadata.name) but doesn't provide database OCID. - if adb.Spec.Details.AutonomousDatabaseOCID == nil { - adb.Spec.Details.AutonomousDatabaseOCID = lastSucSpec.Details.AutonomousDatabaseOCID - } + // If error occurs, the condition status will be marked as false and the error message will still be listed + // If the ADB lifecycleState is intermediate, then condition status will be marked as true + // Otherwise, then condition status will be marked as true if no error occurs + if err != nil { + condition = metav1.Condition{ + Type: CONDITION_TYPE_RECONCILE_ERROR, + LastTransitionTime: metav1.Now(), + ObservedGeneration: adb.GetGeneration(), + Reason: CONDITION_REASON_RECONCILE_ERROR, + Message: errMsg, + Status: metav1.ConditionFalse, + } + } else if dbv4.IsAdbIntermediateState(adb.Status.LifecycleState) { + condition = metav1.Condition{ + Type: CONDITION_TYPE_RECONCILE_QUEUED, + LastTransitionTime: metav1.Now(), + ObservedGeneration: adb.GetGeneration(), + Reason: CONDITION_REASON_RECONCILE_QUEUED, + Message: "no reconcile errors", + Status: metav1.ConditionTrue, + } + } else { + condition = metav1.Condition{ + Type: CONDITION_TYPE_AVAILABLE, + LastTransitionTime: metav1.Now(), + ObservedGeneration: adb.GetGeneration(), + Reason: CONDITION_REASON_AVAILABLE, + Message: "no reconcile errors", + Status: metav1.ConditionTrue, + } + } - // Start/Stop/Terminate - setStateResp, err := oci.SetAutonomousDatabaseLifecycleState(r.currentLogger, dbClient, adb) - if err != nil { - r.currentLogger.Error(err, "Fail to set the Autonomous Database lifecycle state") + meta.SetStatusCondition(&adb.Status.Conditions, condition) +} - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - return ctrl.Result{}, nil - } +func (r *AutonomousDatabaseReconciler) performOperation( + logger logr.Logger, + adb *dbv4.AutonomousDatabase) (specChanged bool, err error) { - if setStateResp != nil { - var lifecycleState database.AutonomousDatabaseLifecycleStateEnum - var opcWorkRequestID *string + l := logger.WithName("validateOperation") - if startResp, isStartResponse := setStateResp.(database.StartAutonomousDatabaseResponse); isStartResponse { - lifecycleState = startResp.AutonomousDatabase.LifecycleState - opcWorkRequestID = startResp.OpcWorkRequestId + switch adb.Spec.Action { + case "Create": + l.Info("Create operation") + err := r.createAutonomousDatabase(logger, adb) + if err != nil { + return false, err + } - } else if stopResp, isStopResponse := setStateResp.(database.StopAutonomousDatabaseResponse); isStopResponse { - lifecycleState = stopResp.AutonomousDatabase.LifecycleState - opcWorkRequestID = stopResp.OpcWorkRequestId + adb.Spec.Action = "" + return true, nil - } else if deleteResp, isDeleteResponse := setStateResp.(database.DeleteAutonomousDatabaseResponse); isDeleteResponse { - // Special case. Delete response doen't contain lifecycle State - lifecycleState = database.AutonomousDatabaseLifecycleStateTerminating - opcWorkRequestID = deleteResp.OpcWorkRequestId + case "Sync": + l.Info("Sync operation") + _, err = r.syncAutonomousDatabase(logger, adb, true) + if err != nil { + return false, err + } - } else { - r.currentLogger.Error(err, "Unknown response type") + adb.Spec.Action = "" + return true, nil - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - return ctrl.Result{}, nil - } + case "Update": + l.Info("Update operation") + err = r.updateAutonomousDatabase(logger, adb) + if err != nil { + return false, err + } - // Update status.state - adb.Status.LifecycleState = lifecycleState - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } + adb.Spec.Action = "" + return true, nil - if err := oci.WaitUntilWorkCompleted(r.currentLogger, workClient, opcWorkRequestID); err != nil { - r.currentLogger.Error(err, "Fail to watch the status of work request. opcWorkRequestID = "+*opcWorkRequestID) + case "Stop": + l.Info("Sending StopAutonomousDatabase request to OCI") - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - } - r.currentLogger.Info(fmt.Sprintf("Set AutonomousDatabase %s lifecycle state to %s successfully\n", - *adb.Spec.Details.DbName, - adb.Spec.Details.LifecycleState)) - } + resp, err := r.dbService.StopAutonomousDatabase(*adb.Spec.Details.Id) + if err != nil { + return false, err + } - // Update the database in OCI from the local resource. - // The local resource will be synchronized again later. - updateGenPassResp, err := oci.UpdateGeneralAndPasswordAttributes(r.currentLogger, r.KubeClient, dbClient, secretClient, adb) - if err != nil { - r.currentLogger.Error(err, "Fail to update Autonomous Database") + adb.Spec.Action = "" + adb.Status.LifecycleState = resp.LifecycleState + return true, nil - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil - } + case "Start": + l.Info("Sending StartAutonomousDatabase request to OCI") - if updateGenPassResp.OpcWorkRequestId != nil { - // Update status.state - adb.Status.LifecycleState = updateGenPassResp.AutonomousDatabase.LifecycleState - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } + resp, err := r.dbService.StartAutonomousDatabase(*adb.Spec.Details.Id) + if err != nil { + return false, err + } - if err := oci.WaitUntilWorkCompleted(r.currentLogger, workClient, updateGenPassResp.OpcWorkRequestId); err != nil { - r.currentLogger.Error(err, "Fail to watch the status of work request. opcWorkRequestID = "+*updateGenPassResp.OpcWorkRequestId) + adb.Spec.Action = "" + adb.Status.LifecycleState = resp.LifecycleState + return true, nil - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - } - r.currentLogger.Info("Update AutonomousDatabase " + *adb.Spec.Details.DbName + " succesfully") - } + case "Terminate": + // OCI only allows terminate operation when the ADB is in an valid state, otherwise requeue the reconcile. + if dbv4.CanBeTerminated(adb.Status.LifecycleState) { + l.Info("Sending DeleteAutonomousDatabase request to OCI") - scaleResp, err := oci.UpdateScaleAttributes(r.currentLogger, r.KubeClient, dbClient, adb) + _, err := r.dbService.DeleteAutonomousDatabase(*adb.Spec.Details.Id) if err != nil { - r.currentLogger.Error(err, "Fail to update Autonomous Database") + return false, err + } - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil + if err := r.removeBackupResources(l, adb); err != nil { + return false, err } - if scaleResp.OpcWorkRequestId != nil { - // Update status.state - adb.Status.LifecycleState = scaleResp.AutonomousDatabase.LifecycleState - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } + adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateTerminating + } else if dbv4.IsAdbIntermediateState(adb.Status.LifecycleState) { + l.Info("Can not terminate an ADB in an intermediate state; exit reconcile") + } - if err := oci.WaitUntilWorkCompleted(r.currentLogger, workClient, scaleResp.OpcWorkRequestId); err != nil { - r.currentLogger.Error(err, "Fail to watch the status of work request. opcWorkRequestID = "+*updateGenPassResp.OpcWorkRequestId) + adb.Spec.Action = "" + return true, nil - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr - } - } - r.currentLogger.Info("Scale AutonomousDatabase " + *adb.Spec.Details.DbName + " succesfully") - } + case "Clone": + resp, err := r.dbService.CreateAutonomousDatabaseClone(adb) + if err != nil { + return false, err + } + adb.Status.LifecycleState = resp.LifecycleState + + adb.Spec.Action = "" + + // Create cloned Autonomous Database resource + clonedAdb := &dbv4.AutonomousDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: *adb.Spec.Clone.DisplayName, + Namespace: adb.Namespace, + }, + Spec: dbv4.AutonomousDatabaseSpec{ + OciConfig: *adb.Spec.OciConfig.DeepCopy(), + }, } + clonedAdb.UpdateFromOciAdb(resp.AutonomousDatabase, true) + if err := r.KubeClient.Create(context.TODO(), clonedAdb); err != nil { + return false, err + } + return true, nil + + case "": + // No-op + return false, nil + default: + adb.Spec.Action = "" + return true, errors.New("Unknown action: " + adb.Spec.Action) + } +} + +func (r *AutonomousDatabaseReconciler) createAutonomousDatabase(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + logger.WithName("createADB").Info("Sending CreateAutonomousDatabase request to OCI") + resp, err := r.dbService.CreateAutonomousDatabase(adb) + if err != nil { + return err } + adb.UpdateFromOciAdb(resp.AutonomousDatabase, true) + + return nil +} + +// syncAutonomousDatabase retrieve the information of AutonomousDatabase from +// OCI and "overwrite" decides whether the spec and the status of "adb" will +// be overwritten. +// It will be a no-op if "Spec.Details.AutonomousDatabaseOCID" of the provided +// AutonomousDatabase is nil. +// This method does not update the actual resource in the cluster. +// +// The returned values are: +// 1. bool: indicates whether the spec is changed after the sync +// 2. error: not nil if an error occurs during the sync +func (r *AutonomousDatabaseReconciler) syncAutonomousDatabase( + logger logr.Logger, + adb *dbv4.AutonomousDatabase, overwrite bool) (specChanged bool, err error) { + if adb.Spec.Details.Id == nil { + return false, nil + } + + l := logger.WithName("syncAutonomousDatabase") + // Get the information from OCI - updatedADB, err := oci.GetAutonomousDatabaseResource(r.currentLogger, dbClient, adb) + l.Info("Sending GetAutonomousDatabase request to OCI") + resp, err := r.dbService.GetAutonomousDatabase(*adb.Spec.Details.Id) + if err != nil { + return false, err + } + + specChanged = adb.UpdateFromOciAdb(resp.AutonomousDatabase, overwrite) + return specChanged, nil +} + +// updateAutonomousDatabase returns true if an OCI request is sent. +// The AutonomousDatabase is updated with the returned object from the OCI requests. +func (r *AutonomousDatabaseReconciler) updateAutonomousDatabase( + logger logr.Logger, + adb *dbv4.AutonomousDatabase) (err error) { + + // Get OCI AutonomousDatabase and update the lifecycleState of the CR, + // so that the validatexx functions know when the state changes back to AVAILABLE + ociAdb := adb.DeepCopy() + _, err = r.syncAutonomousDatabase(logger, ociAdb, true) if err != nil { - r.currentLogger.Error(err, "Fail to get Autonomous Database") + return err + } + + // Start update + // difAdb is used to store ONLY the values of Autonomous Database that are + // difference from the one in OCI + difAdb := adb.DeepCopy() + + detailsAreChanged, err := difAdb.RemoveUnchangedDetails(ociAdb.Spec) + if err != nil { + return err + } - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + // Do the update request only if the current ADB is actually different from the OCI ADB + if detailsAreChanged { + logger.Info("Sending UpdateAutonomousDatabase request to OCI") + + resp, err := r.dbService.UpdateAutonomousDatabase(*adb.Spec.Details.Id, difAdb) + if err != nil { + return err } - return ctrl.Result{}, nil + _ = adb.UpdateFromOciAdb(resp.AutonomousDatabase, true) + } + + return nil +} + +func (r *AutonomousDatabaseReconciler) validateWallet(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + if adb.Spec.Wallet.Name == nil && + adb.Spec.Wallet.Password.K8sSecret.Name == nil && + adb.Spec.Wallet.Password.OciSecret.Id == nil { + return nil + } + + if adb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateProvisioning { + return nil } - adb = updatedADB + l := logger.WithName("validateWallet") + + // lastSucSpec may be nil if this is the first time entering the reconciliation loop + var walletName string + + if adb.Spec.Wallet.Name == nil { + walletName = adb.GetName() + "-instance-wallet" + } else { + walletName = *adb.Spec.Wallet.Name + } - // Update local object and the status - if err := r.updateAutonomousDatabaseDetails(adb); err != nil { - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + secret, err := k8s.FetchSecret(r.KubeClient, adb.GetNamespace(), walletName) + if err == nil { + val, ok := secret.Labels["app"] + if !ok || val != adb.Name { + // Overwrite if the fetched secret has a different label + l.Info("wallet existed but has a different label; skip the download") } - return ctrl.Result{}, err - } - - /***************************************************** - * Instance Wallet - *****************************************************/ - passwordSecretUpdate := (lastSucSpec == nil && adb.Spec.Details.Wallet.Password.K8sSecretName != nil) || - (lastSucSpec != nil && lastSucSpec.Details.Wallet.Password.K8sSecretName != adb.Spec.Details.Wallet.Password.K8sSecretName) - passwordOCIDUpdate := (lastSucSpec == nil && adb.Spec.Details.Wallet.Password.OCISecretOCID != nil) || - (lastSucSpec != nil && lastSucSpec.Details.Wallet.Password.OCISecretOCID != adb.Spec.Details.Wallet.Password.OCISecretOCID) - - if (passwordSecretUpdate || passwordOCIDUpdate) && adb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateAvailable { - if err := adbutil.CreateWalletSecret(r.currentLogger, r.KubeClient, dbClient, secretClient, adb); err != nil { - r.currentLogger.Error(err, "Fail to download Instance Wallet") - // Change the status to UNAVAILABLE - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateUnavailable - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return ctrl.Result{}, statusErr + // No-op if Wallet is already downloaded + return nil + } else if !apiErrors.IsNotFound(err) { + return err + } + + resp, err := r.dbService.DownloadWallet(adb) + if err != nil { + return err + } + + data, err := oci.ExtractWallet(resp.Content) + if err != nil { + return err + } + + adb.Status.WalletExpiringDate = oci.WalletExpiringDate(data) + + label := map[string]string{"app": adb.GetName()} + + if err := k8s.CreateSecret(r.KubeClient, adb.Namespace, walletName, data, adb, label); err != nil { + return err + } + + l.Info(fmt.Sprintf("Wallet is stored in the Secret %s", walletName)) + + return nil +} + +// updateBackupResources get the list of AutonomousDatabasBackups and +// create a backup object if it's not found in the same namespace +func (r *AutonomousDatabaseReconciler) syncBackupResources(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + l := logger.WithName("syncBackupResources") + + // Get the list of AutonomousDatabaseBackupOCID in the same namespace + backupList, err := k8s.FetchAutonomousDatabaseBackups(r.KubeClient, adb.Namespace, adb.Name) + if err != nil { + return err + } + + curBackupNames := make(map[string]bool) + curBackupOCIDs := make(map[string]bool) + + for _, backup := range backupList.Items { + // mark the backup name that exists + curBackupNames[backup.Name] = true + + // mark the backup ocid that exists + if backup.Spec.AutonomousDatabaseBackupOCID != nil { + curBackupOCIDs[*backup.Spec.AutonomousDatabaseBackupOCID] = true + } + } + + resp, err := r.dbService.ListAutonomousDatabaseBackups(*adb.Spec.Details.Id) + if err != nil { + return err + } + + for _, backupSummary := range resp.Items { + // Create the resource if the backup doesn't exist + if !r.ifBackupExists(backupSummary, curBackupOCIDs, backupList) { + validBackupName, err := r.getValidBackupName(*backupSummary.DisplayName, curBackupNames) + if err != nil { + return err } - return ctrl.Result{}, nil + + if err := k8s.CreateAutonomousBackup(r.KubeClient, validBackupName, backupSummary, adb); err != nil { + return err + } + + // Add the used name and ocid + curBackupNames[validBackupName] = true + curBackupOCIDs[*backupSummary.AutonomousDatabaseId] = true + + l.Info("Create AutonomousDatabaseBackup " + validBackupName) } } - /***************************************************** - * Update last succesful spec - *****************************************************/ - if err := adb.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { - return ctrl.Result{}, err + return nil +} + +func (r *AutonomousDatabaseReconciler) getValidBackupName(displayName string, usedNames map[string]bool) (string, error) { + // Convert the displayName to lowercase, and replace spaces, commas, and colons with hyphens + baseName := strings.ToLower(displayName) + + re, err := regexp.Compile(`[^-a-zA-Z0-9]`) + if err != nil { + return "", err } - r.currentLogger.Info("AutonomousDatabase resoursce reconcile successfully") + baseName = re.ReplaceAllString(baseName, "-") - return ctrl.Result{}, nil + finalName := baseName + var i = 1 + _, ok := usedNames[finalName] + for ok { + finalName = fmt.Sprintf("%s-%d", baseName, i) + _, ok = usedNames[finalName] + i++ + } + + return finalName, nil } -// type patchValue struct { -// Op string `json:"op"` -// Path string `json:"path"` -// Value interface{} `json:"value"` -// } - -func (r *AutonomousDatabaseReconciler) updateAutonomousDatabaseDetails(adb *dbv1alpha1.AutonomousDatabase) error { - // Patch the AutonomousDatabase Kubernetes resource to avoid resource version conflicts. - // payload := []patchValue{{ - // Op: "replace", - // Path: "/spec/details", - // Value: adb.Spec.Details, - // }} - // payloadBytes, err := json.Marshal(payload) - // if err != nil { - // return err - // } - - // patch := client.RawPatch(types.JSONPatchType, payloadBytes) - // if err := r.KubeClient.Patch(context.TODO(), adb, patch); err != nil { - // return err - // } - - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - curADB := &dbv1alpha1.AutonomousDatabase{} - - namespacedName := types.NamespacedName{ - Namespace: adb.GetNamespace(), - Name: adb.GetName(), - } +func (r *AutonomousDatabaseReconciler) ifBackupExists(backupSummary database.AutonomousDatabaseBackupSummary, curBackupOCIDs map[string]bool, backupList *dbv4.AutonomousDatabaseBackupList) bool { + _, ok := curBackupOCIDs[*backupSummary.Id] + if ok { + return true + } - if err := r.KubeClient.Get(context.TODO(), namespacedName, curADB); err != nil { - return err + // Special case: when a Backup is creating and hasn't updated the OCID, a duplicated Backup might be created by mistake. + // To handle this case, skip creating the AutonomousDatabaseBackup resource if the current backupSummary is with CREATING state, + // and there is another AutonomousBackup with the same displayName in the cluster is also at CREATING state. + if backupSummary.LifecycleState == database.AutonomousDatabaseBackupSummaryLifecycleStateCreating { + for _, backup := range backupList.Items { + if (backup.Spec.DisplayName != nil && *backup.Spec.DisplayName == *backupSummary.DisplayName) && + (backup.Status.LifecycleState == "" || + backup.Status.LifecycleState == database.AutonomousDatabaseBackupLifecycleStateCreating) { + return true + } } + } - curADB.Spec.Details = adb.Spec.Details - return r.KubeClient.Update(context.TODO(), curADB) - }); err != nil { + return false +} + +// removeBackupResources remove all the AutonomousDatabasBackups that +// are associated with the adb +func (r *AutonomousDatabaseReconciler) removeBackupResources(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + l := logger.WithName("removeBackupResources") + + // Get the list of AutonomousDatabaseBackupOCID in the same namespace + backupList, err := k8s.FetchAutonomousDatabaseBackups(r.KubeClient, adb.Namespace, adb.Name) + if err != nil { return err } - // Update status - if statusErr := adbutil.SetStatus(r.KubeClient, adb); statusErr != nil { - return statusErr + for _, backup := range backupList.Items { + if err := r.KubeClient.Delete(context.TODO(), &backup); err != nil { + return err + } + l.Info("Delete AutonomousDatabaseBackup " + backup.Name) } - r.currentLogger.Info("Update local resource AutonomousDatabase successfully") return nil } diff --git a/controllers/database/autonomousdatabasebackup_controller.go b/controllers/database/autonomousdatabasebackup_controller.go new file mode 100644 index 00000000..9744f3fb --- /dev/null +++ b/controllers/database/autonomousdatabasebackup_controller.go @@ -0,0 +1,267 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + adbfamily "github.com/oracle/oracle-database-operator/commons/adb_family" + "github.com/oracle/oracle-database-operator/commons/oci" +) + +// AutonomousDatabaseBackupReconciler reconciles a AutonomousDatabaseBackup object +type AutonomousDatabaseBackupReconciler struct { + KubeClient client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + + dbService oci.DatabaseService +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AutonomousDatabaseBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbv4.AutonomousDatabaseBackup{}). + WithEventFilter(predicate.GenerationChangedPredicate{}). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). // ReconcileHandler is never invoked concurrently with the same object. + Complete(r) +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=get;list;watch;create;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabasebackups/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabases,verbs=get;list + +func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) + + backup := &dbv4.AutonomousDatabaseBackup{} + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, backup); err != nil { + // Ignore not-found errors, since they can't be fixed by an immediate requeue. + // No need to change the since we don't know if we obtain the object. + if apiErrors.IsNotFound(err) { + return emptyResult, nil + } + // Failed to get AutonomousDatabaseBackup, so we don't need to update the status + return emptyResult, err + } + + /****************************************************************** + * Look up the owner AutonomousDatabase and set the ownerReference + * if the owner hasn't been set yet. + ******************************************************************/ + adbOCID, err := r.verifyTargetAdb(backup) + if err != nil { + return r.manageError(backup, err) + } + + /****************************************************************** + * Get OCI database client + ******************************************************************/ + if err := r.setupOCIClients(backup); err != nil { + return r.manageError(backup, err) + } + + logger.Info("OCI clients configured succesfully") + + /****************************************************************** + * Get status from OCI AutonomousDatabaseBackup + ******************************************************************/ + if backup.Spec.AutonomousDatabaseBackupOCID != nil { + backupResp, err := r.dbService.GetAutonomousDatabaseBackup(*backup.Spec.AutonomousDatabaseBackupOCID) + if err != nil { + return r.manageError(backup, err) + } + + adbResp, err := r.dbService.GetAutonomousDatabase(*backupResp.AutonomousDatabaseId) + if err != nil { + return r.manageError(backup, err) + } + + backup.UpdateStatusFromOCIBackup(backupResp.AutonomousDatabaseBackup, adbResp.AutonomousDatabase) + } + + /****************************************************************** + * Requeue if the Backup is in an intermediate state + * No-op if the Autonomous Database OCID is nil + * To get the latest status, execute before all the reconcile logic + ******************************************************************/ + if dbv4.IsBackupIntermediateState(backup.Status.LifecycleState) { + logger.WithName("IsIntermediateState").Info("Current lifecycleState is " + string(backup.Status.LifecycleState) + "; reconcile queued") + return requeueResult, nil + } + + /****************************************************************** + * If the spec.autonomousDatabaseBackupOCID is empty. + * Otherwise, bind to an exisiting backup. + ******************************************************************/ + if backup.Spec.AutonomousDatabaseBackupOCID == nil { + // Create a new backup + logger.Info("Sending CreateAutonomousDatabaseBackup request to OCI") + backupResp, err := r.dbService.CreateAutonomousDatabaseBackup(backup, adbOCID) + if err != nil { + return r.manageError(backup, err) + } + + // After the creation, update the status first + adbResp, err := r.dbService.GetAutonomousDatabase(*backupResp.AutonomousDatabaseId) + if err != nil { + return r.manageError(backup, err) + } + + backup.UpdateStatusFromOCIBackup(backupResp.AutonomousDatabaseBackup, adbResp.AutonomousDatabase) + if err := r.KubeClient.Status().Update(context.TODO(), backup); err != nil { + return r.manageError(backup, err) + } + + // Then update the OCID + backup.Spec.AutonomousDatabaseBackupOCID = backupResp.Id + backup.UpdateStatusFromOCIBackup(backupResp.AutonomousDatabaseBackup, adbResp.AutonomousDatabase) + + if err := r.KubeClient.Update(context.TODO(), backup); err != nil { + // Do no requeue otherwise it will create multiple backups + r.Recorder.Event(backup, corev1.EventTypeWarning, "ReconcileFailed", err.Error()) + logger.Error(err, "cannot update AutonomousDatabaseBackupOCID; stop reconcile", "AutonomousDatabaseBackupOCID", *backupResp.Id) + + return emptyResult, nil + } + + logger.Info("AutonomousDatabaseBackupOCID updated") + return emptyResult, nil + } + + /****************************************************************** + * Update the status and requeue if it's in an intermediate state + ******************************************************************/ + if err := r.KubeClient.Status().Update(context.TODO(), backup); err != nil { + return r.manageError(backup, err) + } + + if dbv4.IsBackupIntermediateState(backup.Status.LifecycleState) { + logger.WithName("IsIntermediateState").Info("Reconcile queued") + return requeueResult, nil + } + + logger.Info("AutonomousDatabaseBackup reconciles successfully") + + return emptyResult, nil +} + +// setOwnerAutonomousDatabase sets the owner of the AutonomousDatabaseBackup if the AutonomousDatabase resource with the same database OCID is found +func (r *AutonomousDatabaseBackupReconciler) setOwnerAutonomousDatabase(backup *dbv4.AutonomousDatabaseBackup, adb *dbv4.AutonomousDatabase) error { + logger := r.Log.WithName("set-owner-reference") + + controllerutil.SetOwnerReference(adb, backup, r.Scheme) + if err := r.KubeClient.Update(context.TODO(), backup); err != nil { + return err + } + logger.Info(fmt.Sprintf("Set the owner of AutonomousDatabaseBackup %s to AutonomousDatabase %s", backup.Name, adb.Name)) + + return nil +} + +// verifyTargetAdb searches if the target AutonomousDatabase is in the cluster, and set the owner reference to that AutonomousDatabase if it exists. +// The function returns the OCID of the target AutonomousDatabase. +func (r *AutonomousDatabaseBackupReconciler) verifyTargetAdb(backup *dbv4.AutonomousDatabaseBackup) (string, error) { + // Get the target ADB OCID and the ADB resource + ownerAdb, err := adbfamily.VerifyTargetAdb(r.KubeClient, backup.Spec.Target, backup.Namespace) + + if err != nil { + return "", err + } + + // Set the owner reference if needed + if len(backup.GetOwnerReferences()) == 0 && ownerAdb != nil { + if err := r.setOwnerAutonomousDatabase(backup, ownerAdb); err != nil { + return "", err + } + } + + if backup.Spec.Target.OciAdb.OCID != nil { + return *backup.Spec.Target.OciAdb.OCID, nil + } + if ownerAdb != nil && ownerAdb.Spec.Details.Id != nil { + return *ownerAdb.Spec.Details.Id, nil + } + + return "", errors.New("cannot get the OCID of the target AutonomousDatabase") +} + +func (r *AutonomousDatabaseBackupReconciler) setupOCIClients(backup *dbv4.AutonomousDatabaseBackup) error { + var err error + + authData := oci.ApiKeyAuth{ + ConfigMapName: backup.Spec.OCIConfig.ConfigMapName, + SecretName: backup.Spec.OCIConfig.SecretName, + Namespace: backup.GetNamespace(), + } + + provider, err := oci.GetOciProvider(r.KubeClient, authData) + if err != nil { + return err + } + + r.dbService, err = oci.NewDatabaseService(r.Log, r.KubeClient, provider) + if err != nil { + return err + } + + return nil +} + +func (r *AutonomousDatabaseBackupReconciler) manageError(backup *dbv4.AutonomousDatabaseBackup, issue error) (ctrl.Result, error) { + // Send event + r.Recorder.Event(backup, corev1.EventTypeWarning, "ReconcileFailed", issue.Error()) + + return emptyResult, issue +} diff --git a/controllers/database/autonomousdatabaserestore_controller.go b/controllers/database/autonomousdatabaserestore_controller.go new file mode 100644 index 00000000..61b84c5d --- /dev/null +++ b/controllers/database/autonomousdatabaserestore_controller.go @@ -0,0 +1,282 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/oracle/oci-go-sdk/v65/common" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + adbfamily "github.com/oracle/oracle-database-operator/commons/adb_family" + "github.com/oracle/oracle-database-operator/commons/k8s" + "github.com/oracle/oracle-database-operator/commons/oci" +) + +// AutonomousDatabaseRestoreReconciler reconciles a AutonomousDatabaseRestore object +type AutonomousDatabaseRestoreReconciler struct { + KubeClient client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + + dbService oci.DatabaseService + workService oci.WorkRequestService +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AutonomousDatabaseRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbv4.AutonomousDatabaseRestore{}). + WithEventFilter(predicate.GenerationChangedPredicate{}). + Complete(r) +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabaserestores,verbs=get;list;watch;create;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabaserestores/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=autonomousdatabases,verbs=get;list + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the AutonomousDatabaseRestore object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile +func (r *AutonomousDatabaseRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) + + restore := &dbv4.AutonomousDatabaseRestore{} + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, restore); err != nil { + // Ignore not-found errors, since they can't be fixed by an immediate requeue. + // No need to change since we don't know if we obtain the object. + if apiErrors.IsNotFound(err) { + return emptyResult, nil + } + // Failed to get the resource + return emptyResult, err + } + + /****************************************************************** + * Look up the owner AutonomousDatabase and set the ownerReference + * if the owner hasn't been set yet. + ******************************************************************/ + adbOCID, err := r.verifyTargetAdb(restore) + if err != nil { + return r.manageError(restore, err) + } + + /****************************************************************** + * Get OCI database client and work request client + ******************************************************************/ + if err := r.setupOCIClients(restore); err != nil { + return r.manageError(restore, err) + } + + logger.Info("OCI clients configured succesfully") + + /****************************************************************** + * Start the restore or update the status + ******************************************************************/ + if restore.Status.WorkRequestOCID == "" { + logger.Info("Start restoring the database") + // Extract the restoreTime from the spec + restoreTime, err := r.getRestoreSDKTime(restore) + if err != nil { + return r.manageError(restore, err) + } + + logger.Info("Sending RestoreAutonomousDatabase request to OCI") + adbResp, err := r.dbService.RestoreAutonomousDatabase(adbOCID, *restoreTime) + if err != nil { + return r.manageError(restore, err) + } + + // Update the restore status + workResp, err := r.workService.Get(*adbResp.OpcWorkRequestId) + if err != nil { + return r.manageError(restore, err) + } + + restore.UpdateStatus(adbResp.AutonomousDatabase, workResp) + if err := r.KubeClient.Status().Update(context.TODO(), restore); err != nil { + return r.manageError(restore, err) + } + + } else { + // Update the status + logger.Info("Update the status of the restore session") + adbResp, err := r.dbService.GetAutonomousDatabase(adbOCID) + if err != nil { + return r.manageError(restore, err) + } + + workResp, err := r.workService.Get(restore.Status.WorkRequestOCID) + if err != nil { + return r.manageError(restore, err) + } + + restore.UpdateStatus(adbResp.AutonomousDatabase, workResp) + if err := r.KubeClient.Status().Update(context.TODO(), restore); err != nil { + return r.manageError(restore, err) + } + } + + // Requeue if it's in intermediate state + if dbv4.IsRestoreIntermediateState(restore.Status.Status) { + logger.WithName("IsIntermediateState").Info("Current status is " + string(restore.Status.Status) + "; reconcile queued") + return requeueResult, nil + } + + logger.Info("AutonomousDatabaseRestore reconciles successfully") + + return emptyResult, nil +} + +func (r *AutonomousDatabaseRestoreReconciler) getRestoreSDKTime(restore *dbv4.AutonomousDatabaseRestore) (*common.SDKTime, error) { + if restore.Spec.Source.K8sAdbBackup.Name != nil { // restore using backupName + backup := &dbv4.AutonomousDatabaseBackup{} + if err := k8s.FetchResource(r.KubeClient, restore.Namespace, *restore.Spec.Source.K8sAdbBackup.Name, backup); err != nil { + return nil, err + } + + if backup.Status.TimeEnded == "" { + return nil, errors.New("broken backup: ended time is missing from the AutonomousDatabaseBackup " + backup.GetName()) + } + restoreTime, err := backup.GetTimeEnded() + if err != nil { + return nil, err + } + + return restoreTime, nil + + } else { // PIT restore + // The validation of the pitr.timestamp has been handled by the webhook, so the error return is ignored + restoreTime, _ := restore.GetPIT() + return restoreTime, nil + } +} + +// setOwnerAutonomousDatabase sets the owner of the AutonomousDatabaseBackup if the AutonomousDatabase resource with the same database OCID is found +func (r *AutonomousDatabaseRestoreReconciler) setOwnerAutonomousDatabase(restore *dbv4.AutonomousDatabaseRestore, adb *dbv4.AutonomousDatabase) error { + logger := r.Log.WithName("set-owner-reference") + + controllerutil.SetOwnerReference(adb, restore, r.Scheme) + if err := r.KubeClient.Update(context.TODO(), restore); err != nil { + return err + } + logger.Info(fmt.Sprintf("Set the owner of AutonomousDatabaseRestore %s to AutonomousDatabase %s", restore.Name, adb.Name)) + + return nil +} + +// verifyTargetAdb searches if the target ADB is in the cluster, and set the owner reference to the ADB if it exists. +// The function returns the OCID of the target ADB. +func (r *AutonomousDatabaseRestoreReconciler) verifyTargetAdb(restore *dbv4.AutonomousDatabaseRestore) (string, error) { + // Get the target ADB OCID and the ADB resource + ownerAdb, err := adbfamily.VerifyTargetAdb(r.KubeClient, restore.Spec.Target, restore.Namespace) + + if err != nil { + return "", err + } + + // Set the owner reference if needed + if len(restore.GetOwnerReferences()) == 0 && ownerAdb != nil { + if err := r.setOwnerAutonomousDatabase(restore, ownerAdb); err != nil { + return "", err + } + } + + if restore.Spec.Target.OciAdb.OCID != nil { + return *restore.Spec.Target.OciAdb.OCID, nil + } + if ownerAdb != nil && ownerAdb.Spec.Details.Id != nil { + return *ownerAdb.Spec.Details.Id, nil + } + + return "", errors.New("cannot get the OCID of the target Autonomous Database") +} + +func (r *AutonomousDatabaseRestoreReconciler) setupOCIClients(restore *dbv4.AutonomousDatabaseRestore) error { + var err error + + authData := oci.ApiKeyAuth{ + ConfigMapName: restore.Spec.OCIConfig.ConfigMapName, + SecretName: restore.Spec.OCIConfig.SecretName, + Namespace: restore.GetNamespace(), + } + + provider, err := oci.GetOciProvider(r.KubeClient, authData) + if err != nil { + return err + } + + r.dbService, err = oci.NewDatabaseService(r.Log, r.KubeClient, provider) + if err != nil { + return err + } + + r.workService, err = oci.NewWorkRequestService(r.Log, r.KubeClient, provider) + if err != nil { + return err + } + + return nil +} + +// manageError doesn't return the error so that the request won't be requeued +func (r *AutonomousDatabaseRestoreReconciler) manageError(restore *dbv4.AutonomousDatabaseRestore, issue error) (ctrl.Result, error) { + // Send event + r.Recorder.Event(restore, corev1.EventTypeWarning, "ReconcileFailed", issue.Error()) + + return emptyResult, issue +} diff --git a/controllers/database/cdb_controller.go b/controllers/database/cdb_controller.go new file mode 100644 index 00000000..6c5fc747 --- /dev/null +++ b/controllers/database/cdb_controller.go @@ -0,0 +1,1093 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + //"fmt" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" +) + +// CDBReconciler reconciles a CDB object +type CDBReconciler struct { + client.Client + Scheme *runtime.Scheme + Config *rest.Config + Log logr.Logger + Interval time.Duration + Recorder record.EventRecorder +} + +var ( + cdbPhaseInit = "Initializing" + cdbPhasePod = "CreatingPod" + cdbPhaseValPod = "ValidatingPods" + cdbPhaseService = "CreatingService" + cdbPhaseSecrets = "DeletingSecrets" + cdbPhaseReady = "Ready" + cdbPhaseDelete = "Deleting" + cdbPhaseFail = "Failed" +) + +const CDBFinalizer = "database.oracle.com/CDBfinalizer" + +//+kubebuilder:rbac:groups=database.oracle.com,resources=cdbs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=cdbs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=cdbs/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;services;configmaps;events;replicasets,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups=core,resources=pods;secrets;services;configmaps;namespaces,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the CDB object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile +func (r *CDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + cdb := &dbapi.CDB{} + + // Execute for every reconcile + defer func() { + log.Info("DEFER", "Name", cdb.Name, "Phase", cdb.Status.Phase, "Status", strconv.FormatBool(cdb.Status.Status)) + if !cdb.Status.Status { + if err := r.Status().Update(ctx, cdb); err != nil { + log.Error(err, "Failed to update status for :"+cdb.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, cdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("CDB Resource Not found", "Name", cdb.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + cdb.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + log.Info("Res Status:", "Name", cdb.Name, "Phase", cdb.Status.Phase, "Status", strconv.FormatBool(cdb.Status.Status)) + + // Finalizer section + err = r.manageCDBDeletion(ctx, req, cdb) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + + // If post-creation, CDB spec is changed, check and take appropriate action + if (cdb.Status.Phase == cdbPhaseReady) && cdb.Status.Status { + r.evaluateSpecChange(ctx, req, cdb) + } + + if !cdb.Status.Status { + phase := cdb.Status.Phase + log.Info("Current Phase:"+phase, "Name", cdb.Name) + + switch phase { + case cdbPhaseInit: + err = r.verifySecrets(ctx, req, cdb) + if err != nil { + cdb.Status.Phase = cdbPhaseFail + return requeueN, nil + } + cdb.Status.Phase = cdbPhasePod + case cdbPhasePod: + // Create ORDS PODs + err = r.createORDSInstances(ctx, req, cdb) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + cdb.Status.Phase = cdbPhaseValPod + case cdbPhaseValPod: + // Validate ORDS PODs + err = r.validateORDSPods(ctx, req, cdb) + if err != nil { + if cdb.Status.Phase == cdbPhaseFail { + return requeueN, nil + } + log.Info("Reconcile queued") + return requeueY, nil + } + cdb.Status.Phase = cdbPhaseService + case cdbPhaseService: + // Create ORDS Service + err = r.createORDSSVC(ctx, req, cdb) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + //cdb.Status.Phase = cdbPhaseSecrets + cdb.Status.Phase = cdbPhaseReady + case cdbPhaseSecrets: + // Delete CDB Secrets + //r.deleteSecrets(ctx, req, cdb) + cdb.Status.Phase = cdbPhaseReady + cdb.Status.Msg = "Success" + case cdbPhaseReady: + cdb.Status.Status = true + r.Status().Update(ctx, cdb) + return requeueN, nil + default: + cdb.Status.Phase = cdbPhaseInit + log.Info("DEFAULT:", "Name", cdb.Name, "Phase", phase, "Status", strconv.FormatBool(cdb.Status.Status)) + } + + if err := r.Status().Update(ctx, cdb); err != nil { + log.Error(err, "Failed to update status for :"+cdb.Name, "err", err.Error()) + } + return requeueY, nil + } + + log.Info("Reconcile completed") + return requeueN, nil +} + +/* +********************************************************* + - Create a ReplicaSet for pods based on the ORDS container + /******************************************************* +*/ +func (r *CDBReconciler) createORDSInstances(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + + log := r.Log.WithValues("createORDSInstances", req.NamespacedName) + + replicaSet := r.createReplicaSetSpec(cdb) + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSet.Name, Namespace: cdb.Namespace}, foundRS) + if err != nil && apierrors.IsNotFound(err) { + log.Info("Creating ORDS Replicaset: " + replicaSet.Name) + err = r.Create(ctx, replicaSet) + if err != nil { + log.Error(err, "Failed to create ReplicaSet for :"+cdb.Name, "Namespace", replicaSet.Namespace, "Name", replicaSet.Name) + return err + } + } else if err != nil { + log.Error(err, "Replicaset : "+replicaSet.Name+" already exists.") + return err + } + + // Set CDB instance as the owner and controller + ctrl.SetControllerReference(cdb, replicaSet, r.Scheme) + + log.Info("Created ORDS ReplicaSet successfully") + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "CreatedORDSReplicaSet", "Created ORDS Replicaset (Replicas - %s) for %s", strconv.Itoa(cdb.Spec.Replicas), cdb.Name) + return nil +} + +/* +************************************************ + - Validate ORDS Pod. Check if there are any errors + /*********************************************** +*/ +func (r *CDBReconciler) validateORDSPods(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + + log := r.Log.WithValues("validateORDSPod", req.NamespacedName) + + log.Info("Validating Pod creation for :" + cdb.Name) + + podName := cdb.Name + "-ords" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + if len(podList.Items) == 0 { + log.Info("No pods found for: "+podName, "Namespace", req.Namespace) + cdb.Status.Msg = "Waiting for ORDS Pod(s) to start" + return errors.New("Waiting for ORDS pods to start") + } + + /* /opt/oracle/ords/secrets/$TLSKEY /opt/oracle/ords/secrets/$TLSCRT */ + getORDSStatus := " curl --cert /opt/oracle/ords/secrets/tls.crt --key /opt/oracle/ords/secrets/tls.key -sSkv -k -X GET https://localhost:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/stable/metadata-catalog/ || curl --cert /opt/oracle/ords/secrets/tls.crt --key /opt/oracle/ords/secrets/tls.key -sSkv -X GET http://localhost:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/stable/metadata-catalog/ " + readyPods := 0 + for _, pod := range podList.Items { + if pod.Status.Phase == corev1.PodRunning { + // Get ORDS Status + out, err := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getORDSStatus) + if strings.Contains(out, "HTTP/1.1 200 OK") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 200 OK") || + strings.Contains(out, "HTTP/2") || strings.Contains(strings.ToUpper(err.Error()), " HTTP/2") { + readyPods++ + } else if strings.Contains(out, "HTTP/1.1 404 Not Found") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 404 NOT FOUND") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/2 404") || strings.Contains(strings.ToUpper(err.Error()), "Failed to connect to localhost") { + // Check if DB connection parameters are correct + getORDSInstallStatus := " grep -q 'Failed to' /tmp/ords_install.log; echo $?;" + out, _ := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getORDSInstallStatus) + if strings.TrimSpace(out) == "0" { + cdb.Status.Msg = "Check DB connection parameters" + cdb.Status.Phase = cdbPhaseFail + // Delete existing ReplicaSet + r.deleteReplicaSet(ctx, req, cdb) + return errors.New("Check DB connection parameters") + } + } + } + } + + if readyPods != cdb.Spec.Replicas { + log.Info("Replicas: "+strconv.Itoa(cdb.Spec.Replicas), "Ready Pods: ", readyPods) + cdb.Status.Msg = "Waiting for ORDS Pod(s) to be ready" + return errors.New("Waiting for ORDS pods to be ready") + } + + cdb.Status.Msg = "" + return nil +} + +/* +*********************** + - Create Pod spec + +/*********************** +*/ +func (r *CDBReconciler) createPodSpec(cdb *dbapi.CDB) corev1.PodSpec { + + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "secrets", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + DefaultMode: func() *int32 { i := int32(0666); return &i }(), + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.SysAdminPwd.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.SysAdminPwd.Secret.Key, + Path: cdb.Spec.SysAdminPwd.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBAdminUser.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.CDBAdminUser.Secret.Key, + Path: cdb.Spec.CDBAdminUser.Secret.Key, + }, + }, + }, + }, + /***/ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBTlsKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.CDBTlsKey.Secret.Key, + Path: cdb.Spec.CDBTlsKey.Secret.Key, + }, + }, + }, + }, + + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBTlsCrt.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.CDBTlsCrt.Secret.Key, + Path: cdb.Spec.CDBTlsCrt.Secret.Key, + }, + }, + }, + }, + + /***/ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBAdminPwd.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.CDBAdminPwd.Secret.Key, + Path: cdb.Spec.CDBAdminPwd.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.ORDSPwd.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.ORDSPwd.Secret.Key, + Path: cdb.Spec.ORDSPwd.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.WebServerUser.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.WebServerUser.Secret.Key, + Path: cdb.Spec.WebServerUser.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.WebServerPwd.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: cdb.Spec.WebServerPwd.Secret.Key, + Path: cdb.Spec.WebServerPwd.Secret.Key, + }, + }, + }, + }, + }, + }, + }, + }}, + Containers: []corev1.Container{{ + Name: cdb.Name + "-ords", + Image: cdb.Spec.ORDSImage, + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/opt/oracle/ords/secrets", + Name: "secrets", + ReadOnly: true, + }}, + Env: func() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: cdb.Spec.DBServer, + }, + { + Name: "DBTNSURL", + Value: cdb.Spec.DBTnsurl, + }, + { + Name: "TLSCRT", + Value: cdb.Spec.CDBTlsCrt.Secret.Key, + }, + { + Name: "TLSKEY", + Value: cdb.Spec.CDBTlsKey.Secret.Key, + }, + { + Name: "ORACLE_PORT", + Value: strconv.Itoa(cdb.Spec.DBPort), + }, + { + Name: "ORDS_PORT", + Value: strconv.Itoa(cdb.Spec.ORDSPort), + }, + { + Name: "ORACLE_SERVICE", + Value: cdb.Spec.ServiceName, + }, + { + Name: "ORACLE_PWD_KEY", + Value: cdb.Spec.SysAdminPwd.Secret.Key, + }, + { + Name: "CDBADMIN_USER_KEY", + Value: cdb.Spec.CDBAdminUser.Secret.Key, + }, + { + Name: "CDBADMIN_PWD_KEY", + Value: cdb.Spec.CDBAdminPwd.Secret.Key, + }, + { + Name: "ORDS_PWD_KEY", + Value: cdb.Spec.ORDSPwd.Secret.Key, + }, + { + Name: "WEBSERVER_USER_KEY", + Value: cdb.Spec.WebServerUser.Secret.Key, + }, + { + Name: "WEBSERVER_PASSWORD_KEY", + Value: cdb.Spec.WebServerPwd.Secret.Key, + }, + { + Name: "R1", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBPriKey.Secret.SecretName, + }, + Key: cdb.Spec.CDBPriKey.Secret.Key, + }, + }, + }, + } + }(), + }}, + + NodeSelector: func() map[string]string { + ns := make(map[string]string) + if len(cdb.Spec.NodeSelector) != 0 { + for key, value := range cdb.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + } + + if len(cdb.Spec.ORDSImagePullSecret) > 0 { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: cdb.Spec.ORDSImagePullSecret, + }, + } + } + + podSpec.Containers[0].ImagePullPolicy = corev1.PullAlways + + if len(cdb.Spec.ORDSImagePullPolicy) > 0 { + if strings.ToUpper(cdb.Spec.ORDSImagePullPolicy) == "NEVER" { + podSpec.Containers[0].ImagePullPolicy = corev1.PullNever + } + } + + return podSpec +} + +/* +*********************** + - Create ReplicaSet spec + +/*********************** +*/ +func (r *CDBReconciler) createReplicaSetSpec(cdb *dbapi.CDB) *appsv1.ReplicaSet { + + replicas := int32(cdb.Spec.Replicas) + podSpec := r.createPodSpec(cdb) + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cdb.Name + "-ords-rs", + Namespace: cdb.Namespace, + Labels: map[string]string{ + "name": cdb.Name + "-ords-rs", + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: cdb.Name + "-ords", + Namespace: cdb.Namespace, + Labels: map[string]string{ + "name": cdb.Name + "-ords", + }, + }, + Spec: podSpec, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": cdb.Name + "-ords", + }, + }, + }, + } + + return replicaSet +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *CDBReconciler) deleteReplicaSet(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + log := r.Log.WithValues("deleteReplicaSet", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + return err + } + + replicaSetName := cdb.Name + "-ords-rs" + err = k_client.AppsV1().ReplicaSets(cdb.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted ORDS ReplicaSet", "RS Name", replicaSetName) + } + + return nil +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *CDBReconciler) evaluateSpecChange(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + log := r.Log.WithValues("evaluateSpecChange", req.NamespacedName) + + // List the Pods matching the PodTemplate Labels + podName := cdb.Name + "-ords" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + var foundPod corev1.Pod + for _, pod := range podList.Items { + foundPod = pod + break + } + + ordsSpecChange := false + for _, envVar := range foundPod.Spec.Containers[0].Env { + if envVar.Name == "ORACLE_HOST" && envVar.Value != cdb.Spec.DBServer { + ordsSpecChange = true + } else if envVar.Name == "ORACLE_PORT" && envVar.Value != strconv.Itoa(cdb.Spec.DBPort) { + ordsSpecChange = true + } else if envVar.Name == "ORDS_PORT" && envVar.Value != strconv.Itoa(cdb.Spec.ORDSPort) { + ordsSpecChange = true + } else if envVar.Name == "ORACLE_SERVICE" && envVar.Value != cdb.Spec.ServiceName { + ordsSpecChange = true + } + } + + if ordsSpecChange { + // Delete existing ReplicaSet + err = r.deleteReplicaSet(ctx, req, cdb) + if err != nil { + return err + } + + cdb.Status.Phase = cdbPhaseInit + cdb.Status.Status = false + r.Status().Update(ctx, cdb) + } else { + // Update the RS if the value of "replicas" is changed + replicaSetName := cdb.Name + "-ords-rs" + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSetName, Namespace: cdb.Namespace}, foundRS) + if err != nil { + log.Error(err, "Unable to get ORDS Replicaset: "+replicaSetName) + return err + } + + // Check if number of replicas have changed + replicas := int32(cdb.Spec.Replicas) + if cdb.Spec.Replicas != int(*(foundRS.Spec.Replicas)) { + log.Info("Existing Replicas: " + strconv.Itoa(int(*(foundRS.Spec.Replicas))) + ", New Replicas: " + strconv.Itoa(cdb.Spec.Replicas)) + foundRS.Spec.Replicas = &replicas + err = r.Update(ctx, foundRS) + if err != nil { + log.Error(err, "Failed to update ReplicaSet for :"+cdb.Name, "Namespace", cdb.Namespace, "Name", replicaSetName) + return err + } + cdb.Status.Phase = cdbPhaseValPod + cdb.Status.Status = false + r.Status().Update(ctx, cdb) + } + } + + return nil +} + +/* +************************************************ + - Create a Cluster Service for ORDS CDB Pod + /*********************************************** +*/ +func (r *CDBReconciler) createORDSSVC(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + + log := r.Log.WithValues("createORDSSVC", req.NamespacedName) + + foundSvc := &corev1.Service{} + err := r.Get(context.TODO(), types.NamespacedName{Name: cdb.Name + "-ords", Namespace: cdb.Namespace}, foundSvc) + if err != nil && apierrors.IsNotFound(err) { + svc := r.createSvcSpec(cdb) + + log.Info("Creating a new Cluster Service for: "+cdb.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + err := r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new Cluster Service for: "+cdb.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + return err + } + + log.Info("Created ORDS Cluster Service successfully") + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "CreatedORDSService", "Created ORDS Service for %s", cdb.Name) + } else { + log.Info("ORDS Cluster Service already exists") + } + + return nil +} + +/* +*********************** + - Create Service spec + /*********************** +*/ +func (r *CDBReconciler) createSvcSpec(cdb *dbapi.CDB) *corev1.Service { + + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cdb.Name + "-ords", + Namespace: cdb.Namespace, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "name": cdb.Name + "-ords", + }, + ClusterIP: corev1.ClusterIPNone, + }, + } + // Set CDB instance as the owner and controller + ctrl.SetControllerReference(cdb, svc, r.Scheme) + return svc +} + +/* +************************************************ + - Check CDB deletion + +/*********************************************** +*/ +func (r *CDBReconciler) manageCDBDeletion(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + log := r.Log.WithValues("manageCDBDeletion", req.NamespacedName) + + /* REGISTER FINALIZER */ + if cdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(cdb, CDBFinalizer) { + controllerutil.AddFinalizer(cdb, CDBFinalizer) + if err := r.Update(ctx, cdb); err != nil { + return err + } + } + + } else { + log.Info("cdb set to be deleted") + cdb.Status.Phase = cdbPhaseDelete + cdb.Status.Status = true + r.Status().Update(ctx, cdb) + + if controllerutil.ContainsFinalizer(cdb, CDBFinalizer) { + + if err := r.DeletePDBS(ctx, req, cdb); err != nil { + log.Info("Cannot delete pdbs") + return err + } + + controllerutil.RemoveFinalizer(cdb, CDBFinalizer) + if err := r.Update(ctx, cdb); err != nil { + return err + } + } + + err := r.deleteCDBInstance(ctx, req, cdb) + if err != nil { + log.Info("Could not delete CDB Resource", "CDB Name", cdb.Spec.CDBName, "err", err.Error()) + return err + } + + } + return nil +} + +/* +************************************************ + - Delete CDB Resource + +/*********************************************** +*/ +func (r *CDBReconciler) deleteCDBInstance(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + + log := r.Log.WithValues("deleteCDBInstance", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + } + + replicaSetName := cdb.Name + "-ords-rs" + + err = k_client.AppsV1().ReplicaSets(cdb.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted ORDS ReplicaSet", "RS Name", replicaSetName) + } + + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "DeletedORDSReplicaSet", "Deleted ORDS ReplicaSet for %s", cdb.Name) + + svcName := cdb.Name + "-ords" + + err = k_client.CoreV1().Services(cdb.Namespace).Delete(context.TODO(), svcName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete Service", "Service Name", svcName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "DeletedORDSService", "Deleted ORDS Service for %s", cdb.Name) + log.Info("Successfully deleted ORDS Service", "Service Name", svcName) + } + + log.Info("Successfully deleted CDB resource", "CDB Name", cdb.Spec.CDBName) + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + +/*********************************************** +*/ +func (r *CDBReconciler) verifySecrets(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + + log := r.Log.WithValues("verifySecrets", req.NamespacedName) + + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.SysAdminPwd.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.CDBAdminUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.CDBAdminPwd.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.ORDSPwd.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.WebServerUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.WebServerPwd.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.CDBPriKey.Secret.SecretName); err != nil { + return err + } + + cdb.Status.Msg = "" + log.Info("Verified secrets successfully") + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + +/*********************************************** +*/ +func (r *CDBReconciler) checkSecret(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB, secretName string) error { + + log := r.Log.WithValues("checkSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: cdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + cdb.Status.Msg = "Secret not found:" + secretName + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + + return nil +} + +/* +************************************************ + - Delete Secrets + +/*********************************************** +*/ +func (r *CDBReconciler) deleteSecrets(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) { + + log := r.Log.WithValues("deleteSecrets", req.NamespacedName) + + log.Info("Deleting CDB secrets") + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: cdb.Spec.SysAdminPwd.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.SysAdminPwd.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: cdb.Spec.CDBAdminUser.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.CDBAdminUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: cdb.Spec.CDBAdminPwd.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.CDBAdminPwd.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: cdb.Spec.ORDSPwd.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.ORDSPwd.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: cdb.Spec.WebServerUser.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.WebServerUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: cdb.Spec.WebServerPwd.Secret.SecretName, Namespace: cdb.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + cdb.Spec.WebServerPwd.Secret.SecretName) + } + } +} + +/* Delete cascade option */ + +/* +************************************************************* + - SetupWithManager sets up the controller with the Manager. +/************************************************************ +*/ + +func (r *CDBReconciler) DeletePDBS(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + log := r.Log.WithValues("DeletePDBS", req.NamespacedName) + + /* =================== DELETE CASCADE ================ */ + if cdb.Spec.DeletePDBCascade == true { + log.Info("DELETE PDB CASCADE OPTION") + pdbList := &dbapi.PDBList{} + listOpts := []client.ListOption{} + err := r.List(ctx, pdbList, listOpts...) + if err != nil { + log.Info("Failed to get the list of pdbs") + } + + var url string + if err == nil { + for _, pdbitem := range pdbList.Items { + log.Info("pdbitem.Spec.CDBName : " + pdbitem.Spec.CDBName) + log.Info("pdbitem.Spec.CDBNamespace: " + pdbitem.Spec.CDBNamespace) + log.Info("cdb.Spec.CDBName : " + cdb.Spec.CDBName) + log.Info("cdb.Namespace : " + cdb.Namespace) + if pdbitem.Spec.CDBName == cdb.Spec.CDBName && pdbitem.Spec.CDBNamespace == cdb.Namespace { + fmt.Printf("DeletePDBS Call Delete function for %s %s\n", pdbitem.Name, pdbitem.Spec.PDBName) + + var objmap map[string]interface{} /* Used for the return payload */ + values := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE", + } + + //url := "https://" + pdbitem.Spec.CDBResName + "-cdb." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/database/pdbs/" + pdbitem.Spec.PDBName + url = "https://" + pdbitem.Spec.CDBResName + "-ords." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbitem.Spec.PDBName + "/status" + + log.Info("callAPI(URL):" + url) + log.Info("pdbitem.Status.OpenMode" + pdbitem.Status.OpenMode) + + if pdbitem.Status.OpenMode != "MOUNTED" { + + log.Info("Force pdb closure") + respData, errapi := NewCallApi(r, ctx, req, &pdbitem, url, values, "POST") + + fmt.Printf("Debug NEWCALL:%s\n", respData) + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + if errapi != nil { + log.Error(err, "callAPI cannot close pdb "+pdbitem.Spec.PDBName, "err", err.Error()) + return err + } + + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "close pdb", "pdbname=%s", pdbitem.Spec.PDBName) + } + + /* start dropping pdb */ + log.Info("Drop pluggable database") + values = map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE", + } + url = "https://" + pdbitem.Spec.CDBResName + "-ords." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbitem.Spec.PDBName + "/" + respData, errapi := NewCallApi(r, ctx, req, &pdbitem, url, values, "DELETE") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + if errapi != nil { + log.Error(err, "callAPI cannot drop pdb "+pdbitem.Spec.PDBName, "err", err.Error()) + return err + } + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "drop pdb", "pdbname=%s", pdbitem.Spec.PDBName) + + err = r.Delete(context.Background(), &pdbitem, client.GracePeriodSeconds(0)) + if err != nil { + log.Info("Could not delete PDB resource", "err", err.Error()) + return err + } + + } /* check pdb name */ + } /* end of loop */ + } + + } + /* ================================================ */ + return nil +} + +func (r *CDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.CDB{}). + Owns(&appsv1.ReplicaSet{}). //Watch for deleted RS owned by this controller + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} diff --git a/controllers/database/dbcssystem_controller.go b/controllers/database/dbcssystem_controller.go new file mode 100644 index 00000000..1fd94dde --- /dev/null +++ b/controllers/database/dbcssystem_controller.go @@ -0,0 +1,1485 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "fmt" + "reflect" + "strings" + "time" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcsv1 "github.com/oracle/oracle-database-operator/commons/dbcssystem" + "github.com/oracle/oracle-database-operator/commons/finalizer" + "github.com/oracle/oracle-database-operator/commons/oci" + + "github.com/go-logr/logr" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/core" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/keymanagement" + "github.com/oracle/oci-go-sdk/v65/workrequests" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// DbcsSystemReconciler reconciles a DbcsSystem object +type DbcsSystemReconciler struct { + KubeClient client.Client + Scheme *runtime.Scheme + Logv1 logr.Logger + Logger logr.Logger + dbClient database.DatabaseClient + nwClient core.VirtualNetworkClient + wrClient workrequests.WorkRequestClient + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=configmaps;secrets;namespaces,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the DbcsSystem object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile +func (r *DbcsSystemReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Logger = log.FromContext(ctx) + + var err error + resultNq := ctrl.Result{Requeue: false} + resultQ := ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second} + + // Get the dbcs instance from the cluster + dbcsInst := &databasev4.DbcsSystem{} + r.Logger.Info("Reconciling DbSystemDetails", "name", req.NamespacedName) + + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, dbcsInst); err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, err + } + } + + // Create oci-go-sdk client + authData := oci.ApiKeyAuth{ + ConfigMapName: dbcsInst.Spec.OCIConfigMap, + SecretName: dbcsInst.Spec.OCISecret, + Namespace: dbcsInst.GetNamespace(), + } + provider, err := oci.GetOciProvider(r.KubeClient, authData) + if err != nil { + result := resultNq + return result, err + } + + r.dbClient, err = database.NewDatabaseClientWithConfigurationProvider(provider) + + if err != nil { + result := resultNq + return result, err + } + + r.nwClient, err = core.NewVirtualNetworkClientWithConfigurationProvider(provider) + if err != nil { + result := resultNq + return result, err + } + + r.wrClient, err = workrequests.NewWorkRequestClientWithConfigurationProvider(provider) + if err != nil { + result := resultNq + return result, err + } + r.Logger.Info("OCI provider configured succesfully") + + /* + Using Finalizer for object deletion + */ + + if dbcsInst.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted + if dbcsInst.Spec.HardLink && !finalizer.HasFinalizer(dbcsInst) { + finalizer.Register(r.KubeClient, dbcsInst) + r.Logger.Info("Finalizer registered successfully.") + } else if !dbcsInst.Spec.HardLink && finalizer.HasFinalizer(dbcsInst) { + finalizer.Unregister(r.KubeClient, dbcsInst) + r.Logger.Info("Finalizer unregistered successfully.") + } + } else { + // The object is being deleted + r.Logger.Info("Terminate DbcsSystem Database: " + dbcsInst.Spec.DbSystem.DisplayName) + if err := dbcsv1.DeleteDbcsSystemSystem(r.dbClient, *dbcsInst.Spec.Id); err != nil { + r.Logger.Error(err, "Fail to terminate DbcsSystem Instance") + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Terminate, r.nwClient, r.wrClient); statusErr != nil { + result := resultNq + return result, err + } + // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue + result := resultNq + return result, err + } + + // Check if PDBConfig is defined + pdbConfigs := dbcsInst.Spec.PdbConfigs + for _, pdbConfig := range pdbConfigs { + if pdbConfig.PdbName != nil { + // Handle PDB deletion if PluggableDatabaseId is defined and isDelete is true + if pdbConfig.IsDelete != nil && pdbConfig.PluggableDatabaseId != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + dbSystemId := *dbcsInst.Spec.Id + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + result := resultNq + return result, err + } + result := resultNq + return result, err + } + } + } + // Remove the finalizer and update the object + finalizer.Unregister(r.KubeClient, dbcsInst) + r.Logger.Info("Finalizer unregistered successfully.") + // Stop reconciliation as the item is being deleted + result := resultNq + return result, err + } + + /* + Determine whether it's a provision or bind operation + */ + lastSuccessfullSpec, err := dbcsInst.GetLastSuccessfulSpec() + if err != nil { + return ctrl.Result{}, err + } + lastSuccessfullKMSConfig, err := dbcsInst.GetLastSuccessfulKMSConfig() + if err != nil { + return ctrl.Result{}, err + } + lastSuccessfullKMSStatus, err := dbcsInst.GetLastSuccessfulKMSStatus() + if err != nil { + return ctrl.Result{}, err + } + + if lastSuccessfullKMSConfig == nil && lastSuccessfullKMSStatus == nil { + + if dbcsInst.Spec.KMSConfig.KeyName != "" { + + kmsVaultClient, err := keymanagement.NewKmsVaultClientWithConfigurationProvider(provider) + + if err != nil { + return ctrl.Result{}, err + } + + // Determine the criteria to identify or locate the vault based on provided information + // Example: Using displayName as a unique identifier (assumed to be unique in this context) + displayName := dbcsInst.Spec.KMSConfig.VaultName + + // Check if a vault with the given displayName exists + getVaultReq := keymanagement.ListVaultsRequest{ + CompartmentId: &dbcsInst.Spec.KMSConfig.CompartmentId, // Assuming compartment ID is known or provided + } + + listResp, err := kmsVaultClient.ListVaults(ctx, getVaultReq) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error listing vaults: %v", err) + } + + var existingVaultId *string + var existingVaultManagementEndpoint *string + var kmsClient keymanagement.KmsManagementClient + // Find the first active vault with matching displayName + for _, vault := range listResp.Items { + if vault.LifecycleState == keymanagement.VaultSummaryLifecycleStateActive && *vault.DisplayName == displayName { + existingVaultId = vault.Id + existingVaultManagementEndpoint = vault.ManagementEndpoint + // Create KMS Management client + kmsClient, err = keymanagement.NewKmsManagementClientWithConfigurationProvider(provider, *existingVaultManagementEndpoint) + if err != nil { + return ctrl.Result{}, err + } + break + } + } + + // If no active vault found, create a new one + if existingVaultId == nil { + + // Create the KMS vault + createResp, err := r.createKMSVault(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error creating vault: %v", err) + } + existingVaultId = createResp.Id + r.Logger.Info("Created vault Id", existingVaultId) + } else { + // Optionally, perform additional checks or operations if needed + r.Logger.Info("Found existing active vault with displayName", "DisplayName", displayName, "VaultId", *existingVaultId) + dbcsInst.Status.KMSDetailsStatus.VaultId = *existingVaultId + dbcsInst.Status.KMSDetailsStatus.ManagementEndpoint = *existingVaultManagementEndpoint + } + if existingVaultId != nil { + + // Find the key ID based on compartmentID in the existing vault + + listKeysReq := keymanagement.ListKeysRequest{ + CompartmentId: &dbcsInst.Spec.KMSConfig.CompartmentId, + } + + var keyId *string + var keyName *string + + // Make a single request to list keys + listKeysResp, err := kmsClient.ListKeys(ctx, listKeysReq) + if err != nil { + r.Logger.Error(err, "Error listing keys in existing vault") + return ctrl.Result{}, err + } + + // Iterate over the keys to find the desired key + for _, key := range listKeysResp.Items { + if key.DisplayName != nil && *key.DisplayName == dbcsInst.Spec.KMSConfig.KeyName { + keyId = key.Id + keyName = key.DisplayName + dbcsInst.Status.KMSDetailsStatus.KeyId = *key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *key.DisplayName + break + } + } + + if keyId == nil { + r.Logger.Info("Master key not found in existing vault, creating new key") + + // Create the KMS key in the existing vault + keyResponse, err := r.createKMSKey(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + + // Update the DbSystem with the encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyResponse.Key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyResponse.Key.DisplayName + } else { + r.Logger.Info("Found existing master key in vault", "KeyName", dbcsInst.Spec.KMSConfig.KeyName, "KeyId", *keyId) + + // Update the DbSystem with the existing encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyId + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyName + } + } else { + r.Logger.Info("Creating new vault") + + // Create the new vault + vaultResponse, err := r.createKMSVault(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + dbcsInst.Status.KMSDetailsStatus.VaultId = *vaultResponse.Id + dbcsInst.Status.KMSDetailsStatus.ManagementEndpoint = *vaultResponse.ManagementEndpoint + // Create the KMS key in the newly created vault + keyResponse, err := r.createKMSKey(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + + // Update the DbSystem with the encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyResponse.Key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyResponse.Key.DisplayName + + } + } + } + //debugging + // lastSuccessfullSpec = nil + // r.ensureDBSystemSpec(&dbcsInst.Spec.DbSystem) + // Check if cloning is needed, debugging + // *dbcsInst.Status.DbCloneStatus.Id = "" + setupCloning := false + // Check if SetupDBCloning is true and ensure one of the required fields is provided + if dbcsInst.Spec.SetupDBCloning { + // If SetupDBCloning is true, at least one of Id, DbBackupId, or DatabaseId must be non-nil + if dbcsInst.Spec.Id == nil && dbcsInst.Spec.DbBackupId == nil && dbcsInst.Spec.DatabaseId == nil { + // If none of the required fields are set, log an error and exit the function + r.Logger.Error(err, "SetupDBCloning is defined but other necessary details (Id, DbBackupId, DatabaseId) are not present. Refer README.md file for instructions.") + return ctrl.Result{}, nil + } + // If the condition is met, proceed with cloning setup + setupCloning = true + } else { + // If SetupDBCloning is false, continue as usual without cloning + setupCloning = false + } + + var dbSystemId string + // Executing DB Cloning Process, if defined. Do not repeat cloning again when Status has Id present. + if setupCloning && dbcsInst.Status.DbCloneStatus.Id == nil { + switch { + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DbBackupId != nil: + dbSystemId, err = dbcsv1.CloneFromBackupAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system from backup and get DbcsSystem System ID") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + + return ctrl.Result{}, nil + } + r.Logger.Info("DB Cloning completed successfully from provided backup DB system") + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DatabaseId != nil: + dbSystemId, err = dbcsv1.CloneFromDatabaseAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system from DatabaseID provided") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + + return ctrl.Result{}, nil + } + r.Logger.Info("DB Cloning completed successfully from provided databaseId") + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DbBackupId == nil && dbcsInst.Spec.DatabaseId == nil: + dbSystemId, err = dbcsv1.CloneAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system and get DbcsSystem System ID") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, nil + } + r.Logger.Info("DB Cloning completed successfully from provided db system") + } + } else if !setupCloning { + if dbcsInst.Spec.Id == nil && lastSuccessfullSpec == nil { + // If no DbcsSystem ID specified, create a new DB System + // ======================== Validate Specs ============== + err = dbcsv1.ValidateSpex(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.Recorder) + if err != nil { + return ctrl.Result{}, err + } + r.Logger.Info("DbcsSystem DBSystem provisioning") + dbcsID, err := dbcsv1.CreateAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + r.Logger.Error(err, "Fail to provision and get DbcsSystem System ID") + + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue + return ctrl.Result{}, nil + } + + assignDBCSID(dbcsInst, dbcsID) + // Check if KMSConfig is specified + kmsConfig := dbcsInst.Spec.KMSConfig + if kmsConfig != (databasev4.KMSConfig{}) { + // Check if KMSDetailsStatus is uninitialized (zero value) + if dbcsInst.Spec.DbSystem.KMSConfig != dbcsInst.Spec.KMSConfig { + dbcsInst.Spec.DbSystem.KMSConfig = dbcsInst.Spec.KMSConfig + } + } + if err := dbcsv1.UpdateDbcsSystemId(r.KubeClient, dbcsInst); err != nil { + // Change the status to Failed + assignDBCSID(dbcsInst, dbcsID) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, err + } + + r.Logger.Info("DbcsSystem system provisioned succesfully") + assignDBCSID(dbcsInst, dbcsID) + if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { + return ctrl.Result{}, err + } + assignDBCSID(dbcsInst, dbcsID) + } else { + if lastSuccessfullSpec == nil { // first time after creation of DB + if err := dbcsv1.GetDbSystemId(r.Logger, r.dbClient, dbcsInst); err != nil { + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, err + } + if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { + // Change the status to required state + return ctrl.Result{}, err + } + + dbSystemId := *dbcsInst.Spec.Id + if err := dbcsv1.UpdateDbcsSystemId(r.KubeClient, dbcsInst); err != nil { + // Change the status to Failed + assignDBCSID(dbcsInst, dbSystemId) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, err + } + + r.Logger.Info("Sync information from remote DbcsSystem System successfully") + + dbSystemId = *dbcsInst.Spec.Id + if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { + return ctrl.Result{}, err + } + assignDBCSID(dbcsInst, dbSystemId) + } else { + dbSystemId := "" + if dbcsInst.Spec.Id == nil { + dbcsInst.Spec.Id = lastSuccessfullSpec.Id + dbSystemId = *dbcsInst.Spec.Id + } else { + dbSystemId = *dbcsInst.Spec.Id + } + //debugging + // *dbcsInst.Spec.Id = "ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq" + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, *dbcsInst.Spec.Id) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, *dbcsInst.Spec.Id) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, *dbcsInst.Spec.Id, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + err = r.getPluggableDatabaseDetails(ctx, dbcsInst, *dbcsInst.Spec.Id, databaseIds) + if err != nil { + fmt.Printf("Failed to get pluggable database details: %v\n", err) + return ctrl.Result{}, err + } + + if err := dbcsv1.UpdateDbcsSystemIdInst(r.Logger, r.dbClient, dbcsInst, r.KubeClient, r.nwClient, r.wrClient, databaseIds[0]); err != nil { + r.Logger.Error(err, "Fail to update DbcsSystem Id") + + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue + return ctrl.Result{}, nil + } + if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { + // Change the status to required state + return ctrl.Result{}, err + } + // Update Spec and Status + result, err := r.updateSpecsAndStatus(ctx, dbcsInst, dbSystemId) + if err != nil { + return result, err + } + } + } + } + + // Update the Wallet Secret when the secret name is given + //r.updateWalletSecret(dbcs) + + // Update the last succesful spec + if dbcsInst.Spec.Id != nil { + dbSystemId = *dbcsInst.Spec.Id + + if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { + return ctrl.Result{}, err + } + } else if dbcsInst.Status.DbCloneStatus.Id != nil { + dbSystemId = *dbcsInst.Status.DbCloneStatus.Id + } + //assignDBCSID(dbcsInst,dbcsI) + // Change the phase to "Available" + assignDBCSID(dbcsInst, dbSystemId) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Available, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + + r.Logger.Info("DBInst after assignment", "dbcsInst:->", dbcsInst) + + // Check if specified PDB exists or needs to be created + exists, err := r.validatePDBExistence(dbcsInst) + if err != nil { + fmt.Printf("Failed to get PDB Details: %v\n", err) + return ctrl.Result{}, err + } + if dbcsInst.Spec.PdbConfigs != nil { + if !exists { + for _, pdbConfig := range dbcsInst.Spec.PdbConfigs { + if pdbConfig.PdbName != nil { + // Get database details + // Get DB Home ID by DB System ID + // Get Compartment ID by DB System ID + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, dbSystemId) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, dbSystemId, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + + // Now you can use dbDetails to access database attributes + r.Logger.Info("Database details fetched successfully", "DatabaseId", databaseIds) + + // Check if deletion is requested + if pdbConfig.IsDelete != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + return ctrl.Result{}, err + } + // Continue to the next pdbConfig + continue + } else { + // Call the method to create the pluggable database + r.Logger.Info("Calling createPluggableDatabase", "ctx:->", ctx, "dbcsInst:->", dbcsInst, "databaseIds:->", databaseIds[0], "compartmentId:->", compartmentId) + pdbId, err := r.createPluggableDatabase(ctx, dbcsInst, pdbConfig, databaseIds[0], compartmentId, dbSystemId) + if err != nil { + // Handle error if required + return ctrl.Result{}, err + } + + // Create or update the PDBConfigStatus in DbcsSystemStatus + pdbConfigStatus := databasev4.PDBConfigStatus{ + PdbName: pdbConfig.PdbName, + ShouldPdbAdminAccountBeLocked: pdbConfig.ShouldPdbAdminAccountBeLocked, + PdbLifecycleState: databasev4.Available, + FreeformTags: pdbConfig.FreeformTags, + PluggableDatabaseId: &pdbId, + } + + // Create a map to track existing PDBConfigStatus by PdbName + pdbDetailsMap := make(map[string]databasev4.PDBConfigStatus) + + // Populate the map with existing PDBConfigStatus from dbcsInst.Status.PdbDetailsStatus + for _, pdbDetails := range dbcsInst.Status.PdbDetailsStatus { + for _, existingPdbConfig := range pdbDetails.PDBConfigStatus { + pdbDetailsMap[*existingPdbConfig.PdbName] = existingPdbConfig + } + } + + // Update the map with the new or updated PDBConfigStatus + pdbDetailsMap[*pdbConfig.PdbName] = pdbConfigStatus + + // Convert the map back to a slice of PDBDetailsStatus + var updatedPdbDetailsStatus []databasev4.PDBDetailsStatus + for _, pdbConfigStatus := range pdbDetailsMap { + updatedPdbDetailsStatus = append(updatedPdbDetailsStatus, databasev4.PDBDetailsStatus{ + PDBConfigStatus: []databasev4.PDBConfigStatus{pdbConfigStatus}, + }) + } + + // Assign the updated slice to dbcsInst.Status.PdbDetailsStatus + dbcsInst.Status.PdbDetailsStatus = updatedPdbDetailsStatus + // Update the status in Kubernetes + // Update the status subresource + err = r.KubeClient.Status().Update(ctx, dbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB status") + return reconcile.Result{}, err + } + + } + } + } + } else { + r.Logger.Info("No change in PDB configurations or, already existed PDB Status.") + } + } + // } else { + // r.Logger.Info("No PDB configurations given.") + // } + // r.Logger.Info("DBInst after assignment", "dbcsInst:->", dbcsInst) + // // Check if PDBConfig is defined and needs to be created or deleted + pdbConfigs := dbcsInst.Spec.PdbConfigs + if pdbConfigs != nil { + for _, pdbConfig := range pdbConfigs { + if pdbConfig.PdbName != nil { + // Get database details + // Get DB Home ID by DB System ID + // Get Compartment ID by DB System ID + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, dbSystemId) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, dbSystemId, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + + // Now you can use dbDetails to access database attributes + r.Logger.Info("Database details fetched successfully", "DatabaseId", databaseIds) + + // Check if deletion is requested + if pdbConfig.IsDelete != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + return ctrl.Result{}, err + } + // Continue to the next pdbConfig + continue + } else { + // Call the method to create the pluggable database + r.Logger.Info("Calling createPluggableDatabase", "ctx:->", ctx, "dbcsInst:->", dbcsInst, "databaseIds:->", databaseIds[0], "compartmentId:->", compartmentId) + _, err := r.createPluggableDatabase(ctx, dbcsInst, pdbConfig, databaseIds[0], compartmentId, dbSystemId) + if err != nil { + // Handle error if required + return ctrl.Result{}, err + } + } + } + } + } + + return resultQ, nil + +} +func (r *DbcsSystemReconciler) updateSpecsAndStatus(ctx context.Context, dbcsInst *databasev4.DbcsSystem, dbSystemId string) (reconcile.Result, error) { + + // Retry mechanism for handling resource version conflicts + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Fetch the latest version of the resource + latestDbcsInst := &databasev4.DbcsSystem{} + err := r.KubeClient.Get(ctx, types.NamespacedName{ + Name: dbcsInst.Name, + Namespace: dbcsInst.Namespace, + }, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to fetch the latest DB resource") + return err + } + + // Update the Spec subresource + latestDbcsInst.Spec.Id = &dbSystemId + err = r.KubeClient.Update(ctx, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB Spec") + return err + } + + // Update the Status subresource + + // Update the Status subresource + originalStatus := reflect.ValueOf(&dbcsInst.Status).Elem() + latestStatus := reflect.ValueOf(&latestDbcsInst.Status).Elem() + + // Iterate over all fields in the Status struct and update them + for i := 0; i < originalStatus.NumField(); i++ { + fieldName := originalStatus.Type().Field(i).Name + latestStatus.FieldByName(fieldName).Set(originalStatus.Field(i)) + } + + err = r.KubeClient.Status().Update(ctx, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB status") + return err + } + + return nil + }) + + if retryErr != nil { + r.Logger.Error(retryErr, "Failed to update DB Spec and Status after retries") + return reconcile.Result{}, retryErr + } + + r.Logger.Info("Successfully updated Spec and Status") + return reconcile.Result{}, nil +} + +// getDbHomeIdByDbSystemID retrieves the DB Home ID associated with the given DB System ID +func (r *DbcsSystemReconciler) getDbHomeIdByDbSystemID(ctx context.Context, compartmentId, dbSystemId string) (string, error) { + listRequest := database.ListDbHomesRequest{ + CompartmentId: &compartmentId, + DbSystemId: &dbSystemId, + } + + listResponse, err := r.dbClient.ListDbHomes(ctx, listRequest) + if err != nil { + return "", fmt.Errorf("failed to list DB homes: %v", err) + } + + if len(listResponse.Items) == 0 { + return "", fmt.Errorf("no DB homes found for DB system ID: %s", dbSystemId) + } + + return *listResponse.Items[0].Id, nil +} +func (r *DbcsSystemReconciler) getCompartmentIDByDbSystemID(ctx context.Context, dbSystemId string) (string, error) { + // Construct the GetDbSystem request + getRequest := database.GetDbSystemRequest{ + DbSystemId: &dbSystemId, + } + + // Call GetDbSystem API using the existing dbClient + getResponse, err := r.dbClient.GetDbSystem(ctx, getRequest) + if err != nil { + return "", fmt.Errorf("failed to get DB system details: %v", err) + } + + // Extract the compartment ID from the DB system details + compartmentId := *getResponse.DbSystem.CompartmentId + + return compartmentId, nil +} +func (r *DbcsSystemReconciler) getDatabaseIDByDbSystemID(ctx context.Context, dbSystemId, compartmentId, dbHomeId string) ([]string, error) { + // Construct the ListDatabases request + request := database.ListDatabasesRequest{ + SystemId: &dbSystemId, + CompartmentId: &compartmentId, + DbHomeId: &dbHomeId, + } + + // Call ListDatabases API using the existing dbClient + response, err := r.dbClient.ListDatabases(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to list databases: %v", err) + } + + // Extract database IDs from the response + var databaseIds []string + for _, dbSummary := range response.Items { + databaseIds = append(databaseIds, *dbSummary.Id) + } + + return databaseIds, nil +} +func (r *DbcsSystemReconciler) validatePDBExistence(dbcs *databasev4.DbcsSystem) (bool, error) { + r.Logger.Info("Validating PDB existence for all provided PDBs") + + // Iterate over each PDBConfig in Spec.PdbConfigs + for _, pdbConfig := range dbcs.Spec.PdbConfigs { + pdbName := pdbConfig.PdbName + r.Logger.Info("Checking PDB existence in Status", "PDBName", *pdbName) + + found := false + + // Check if the PDB exists in Status.PdbDetailsStatus with a state of "Available" + for _, pdbDetailsStatus := range dbcs.Status.PdbDetailsStatus { + for _, pdbStatus := range pdbDetailsStatus.PDBConfigStatus { + if pdbStatus.PdbName != nil && *pdbStatus.PdbName == *pdbName && pdbStatus.PdbLifecycleState == "AVAILABLE" { + found = true + break + } + } + if found { + break + } + } + + if !found { + r.Logger.Info("Pluggable database does not exist or is not available in Status.PdbDetailsStatus", "PDBName", *pdbName) + return false, nil + } + } + + // If all PDBs are found and available + r.Logger.Info("All specified PDBs are available") + return true, nil +} +func (r *DbcsSystemReconciler) createPluggableDatabase(ctx context.Context, dbcs *databasev4.DbcsSystem, pdbConfig databasev4.PDBConfig, databaseId, compartmentId, dbSystemId string) (string, error) { + r.Logger.Info("Checking if the pluggable database exists", "PDBName", pdbConfig.PdbName) + + // Check if the pluggable database already exists + exists, pdbId, err := r.doesPluggableDatabaseExist(ctx, compartmentId, pdbConfig.PdbName, databaseId) + if err != nil { + r.Logger.Error(err, "Failed to check if pluggable database exists", "PDBName", pdbConfig.PdbName) + return "", err + } + if exists { + // Set the PluggableDatabaseId in PDBConfig + pdbConfig.PluggableDatabaseId = pdbId + r.Logger.Info("Pluggable database already exists", "PDBName", pdbConfig.PdbName, "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return *pdbId, nil + } + + // Define the DatabaseExists method locally + databaseExists := func(dbSystemID string) (bool, error) { + req := database.GetDbSystemRequest{ + DbSystemId: &dbSystemID, + } + _, err := r.dbClient.GetDbSystem(ctx, req) + if err != nil { + if ociErr, ok := err.(common.ServiceError); ok && ociErr.GetHTTPStatusCode() == 404 { + return false, nil + } + return false, err + } + return true, nil + } + + exists, err = databaseExists(dbSystemId) + if err != nil { + r.Logger.Error(err, "Failed to check database existence") + return "", err + } + + if !exists { + errMsg := fmt.Sprintf("Database does not exist: %s", dbSystemId) + r.Logger.Error(fmt.Errorf(errMsg), "Database not found") + return "", fmt.Errorf(errMsg) + } + + // Fetch secrets for TdeWalletPassword and PdbAdminPassword + tdeWalletPassword, err := r.getSecret(ctx, dbcs.Namespace, *pdbConfig.TdeWalletPassword) + // Trim newline character from the password + tdeWalletPassword = strings.TrimSpace(tdeWalletPassword) + r.Logger.Info("TDE wallet password retrieved successfully") + if err != nil { + r.Logger.Error(err, "Failed to get TDE wallet password secret") + return "", err + } + + pdbAdminPassword, err := r.getSecret(ctx, dbcs.Namespace, *pdbConfig.PdbAdminPassword) + // Trim newline character from the password + pdbAdminPassword = strings.TrimSpace(pdbAdminPassword) + r.Logger.Info("PDB admin password retrieved successfully") + if err != nil { + r.Logger.Error(err, "Failed to get PDB admin password secret") + return "", err + } + // Change the status to Provisioning + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Provision, r.nwClient, r.wrClient); statusErr != nil { + r.Logger.Error(err, "Failed to set DBCS LifeCycle State to Provisioning") + return "", statusErr + } + r.Logger.Info("Updated DBCS LifeCycle State to Provisioning") + // Proceed with creating the pluggable database + r.Logger.Info("Creating pluggable database", "PDBName", pdbConfig.PdbName) + createPdbReq := database.CreatePluggableDatabaseRequest{ + CreatePluggableDatabaseDetails: database.CreatePluggableDatabaseDetails{ + PdbName: pdbConfig.PdbName, + ContainerDatabaseId: &databaseId, + ShouldPdbAdminAccountBeLocked: pdbConfig.ShouldPdbAdminAccountBeLocked, + PdbAdminPassword: common.String(pdbAdminPassword), + TdeWalletPassword: common.String(tdeWalletPassword), + FreeformTags: pdbConfig.FreeformTags, + }, + } + response, err := r.dbClient.CreatePluggableDatabase(ctx, createPdbReq) + if err != nil { + r.Logger.Error(err, "Failed to create pluggable database", "PDBName", pdbConfig.PdbName) + return "", err + } + // Set the PluggableDatabaseId in PDBConfig + pdbConfig.PluggableDatabaseId = response.PluggableDatabase.Id + + r.Logger.Info("Pluggable database creation initiated", "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + + // Polling mechanism to check PDB status + const maxRetries = 120 // total 1 hour wait for creation of PDB + const retryInterval = 30 // in seconds + + for i := 0; i < maxRetries; i++ { + getPdbReq := database.GetPluggableDatabaseRequest{ + PluggableDatabaseId: pdbConfig.PluggableDatabaseId, + } + + getPdbResp, err := r.dbClient.GetPluggableDatabase(ctx, getPdbReq) + if err != nil { + r.Logger.Error(err, "Failed to get pluggable database status", "PDBID", *pdbConfig.PluggableDatabaseId) + return "", err + } + + pdbStatus := getPdbResp.PluggableDatabase.LifecycleState + r.Logger.Info("Checking pluggable database status", "PDBID", *pdbConfig.PluggableDatabaseId, "Status", pdbStatus) + + if pdbStatus == database.PluggableDatabaseLifecycleStateAvailable { + r.Logger.Info("Pluggable database successfully created", "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + // Change the status to Available + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Available, r.nwClient, r.wrClient); statusErr != nil { + return "", statusErr + } + return *response.PluggableDatabase.Id, nil + } + + if pdbStatus == database.PluggableDatabaseLifecycleStateFailed { + r.Logger.Error(fmt.Errorf("pluggable database creation failed"), "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return "", statusErr + } + return "", fmt.Errorf("pluggable database creation failed") + } + + time.Sleep(retryInterval * time.Second) + } + + r.Logger.Error(fmt.Errorf("timed out waiting for pluggable database to become available"), "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + return "", fmt.Errorf("timed out waiting for pluggable database to become available") +} + +func (r *DbcsSystemReconciler) pluggableDatabaseExists(ctx context.Context, pluggableDatabaseId string) (bool, error) { + req := database.GetPluggableDatabaseRequest{ + PluggableDatabaseId: &pluggableDatabaseId, + } + _, err := r.dbClient.GetPluggableDatabase(ctx, req) + if err != nil { + if ociErr, ok := err.(common.ServiceError); ok && ociErr.GetHTTPStatusCode() == 404 { + // PDB does not exist + return false, nil + } + // Other error occurred + return false, err + } + // PDB exists + return true, nil +} + +func (r *DbcsSystemReconciler) deletePluggableDatabase(ctx context.Context, pdbConfig databasev4.PDBConfig, dbSystemId string) error { + if pdbConfig.PdbName == nil { + return fmt.Errorf("PDB name is not specified") + } + + r.Logger.Info("Deleting pluggable database", "PDBName", *pdbConfig.PdbName) + + if pdbConfig.PluggableDatabaseId == nil { + r.Logger.Info("PluggableDatabaseId is not specified, getting pluggable databaseID") + // Call a function to retrieve PluggableDatabaseId + pdbID, err := r.getPluggableDatabaseID(ctx, pdbConfig, dbSystemId) + if err != nil { + return fmt.Errorf("failed to get PluggableDatabaseId: %v", err) + } + pdbConfig.PluggableDatabaseId = &pdbID + } + + // Now pdbConfig.PluggableDatabaseId should not be nil + if pdbConfig.PluggableDatabaseId == nil { + return fmt.Errorf("PluggableDatabaseId is still nil after retrieval attempt. Nothing to delete") + } + + // Check if PluggableDatabaseId exists in the live system + exists, err := r.pluggableDatabaseExists(ctx, *pdbConfig.PluggableDatabaseId) + if err != nil { + r.Logger.Error(err, "Failed to check if pluggable database exists", "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return err + } + if !exists { + r.Logger.Info("PluggableDatabaseId does not exist in the live system, nothing to delete", "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return nil + } + + // Define the delete request + deleteReq := database.DeletePluggableDatabaseRequest{ + PluggableDatabaseId: pdbConfig.PluggableDatabaseId, + } + + // Call OCI SDK to delete the PDB + _, err = r.dbClient.DeletePluggableDatabase(ctx, deleteReq) + if err != nil { + r.Logger.Error(err, "Failed to delete pluggable database", "PDBName", *pdbConfig.PdbName) + return err + } + + r.Logger.Info("Successfully deleted pluggable database", "PDBName", *pdbConfig.PdbName) + return nil +} + +func (r *DbcsSystemReconciler) getPluggableDatabaseID(ctx context.Context, pdbConfig databasev4.PDBConfig, dbSystemId string) (string, error) { + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return "", err + } + request := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + response, err := r.dbClient.ListPluggableDatabases(ctx, request) + if err != nil { + return "", fmt.Errorf("failed to list Pluggable Databases: %v", err) + } + + var pdbID string + + for _, pdb := range response.Items { + if *pdb.PdbName == *pdbConfig.PdbName { + pdbID = *pdb.Id + break + } + } + + if pdbID == "" { + return "", fmt.Errorf("pluggable database '%s' not found", *pdbConfig.PdbName) + } + return pdbID, nil +} + +func (r *DbcsSystemReconciler) getPluggableDatabaseDetails(ctx context.Context, dbcsInst *databasev4.DbcsSystem, dbSystemId string, databaseIds []string) error { + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return err + } + request := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + response, err := r.dbClient.ListPluggableDatabases(ctx, request) + if err != nil { + return fmt.Errorf("failed to list Pluggable Databases: %v", err) + } + + // Create a map to track existing PDBDetailsStatus by PdbName + pdbDetailsMap := make(map[string]databasev4.PDBConfigStatus) + + // Populate the map with existing PDBDetailsStatus from dbcsInst.Status.PdbDetailsStatus + // for _, existingPdbDetails := range dbcsInst.Status.PdbDetailsStatus { + // for _, existingPdbConfig := range existingPdbDetails.PDBConfigStatus { + // pdbDetailsMap[*existingPdbConfig.PdbName] = existingPdbConfig + // } + // } + // Convert databaseIds array to a set for quick lookup + databaseIdsSet := make(map[string]struct{}) + for _, id := range databaseIds { + databaseIdsSet[id] = struct{}{} + } + // Update the map with new PDB details from the response + for _, pdb := range response.Items { + if pdb.ContainerDatabaseId != nil { + // Check if the ContainerDatabaseId is in the set of databaseIds + if _, exists := databaseIdsSet[*pdb.ContainerDatabaseId]; exists { + pdbConfigStatus := databasev4.PDBConfigStatus{ + PdbName: pdb.PdbName, + ShouldPdbAdminAccountBeLocked: pdb.IsRestricted, + FreeformTags: pdb.FreeformTags, + PluggableDatabaseId: pdb.Id, + PdbLifecycleState: convertLifecycleState(pdb.LifecycleState), + } + + // Update the map with the new or updated PDBConfigStatus + pdbDetailsMap[*pdb.PdbName] = pdbConfigStatus + } + } + } + + // Convert the map back to a slice of PDBDetailsStatus + var updatedPdbDetailsStatus []databasev4.PDBDetailsStatus + for _, pdbConfigStatus := range pdbDetailsMap { + updatedPdbDetailsStatus = append(updatedPdbDetailsStatus, databasev4.PDBDetailsStatus{ + PDBConfigStatus: []databasev4.PDBConfigStatus{pdbConfigStatus}, + }) + } + + // Assign the updated slice to dbcsInst.Status.PdbDetailsStatus + dbcsInst.Status.PdbDetailsStatus = updatedPdbDetailsStatus + + return nil +} + +func convertLifecycleState(state database.PluggableDatabaseSummaryLifecycleStateEnum) databasev4.LifecycleState { + switch state { + case database.PluggableDatabaseSummaryLifecycleStateProvisioning: + return databasev4.Provision + case database.PluggableDatabaseSummaryLifecycleStateAvailable: + return databasev4.Available + case database.PluggableDatabaseSummaryLifecycleStateTerminating: + return databasev4.Terminate + case database.PluggableDatabaseSummaryLifecycleStateTerminated: + return databasev4.LifecycleState(databasev4.Terminated) + case database.PluggableDatabaseSummaryLifecycleStateUpdating: + return databasev4.Update + case database.PluggableDatabaseSummaryLifecycleStateFailed: + return databasev4.Failed + default: + return databasev4.Failed + } +} + +// doesPluggableDatabaseExist checks if a pluggable database with the given name exists +func (r *DbcsSystemReconciler) doesPluggableDatabaseExist(ctx context.Context, compartmentId string, pdbName *string, databaseId string) (bool, *string, error) { + if pdbName == nil { + return false, nil, fmt.Errorf("pdbName is nil") + } + + listPdbsReq := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + resp, err := r.dbClient.ListPluggableDatabases(ctx, listPdbsReq) + if err != nil { + return false, nil, err + } + + for _, pdb := range resp.Items { + if pdb.ContainerDatabaseId != nil { + if pdb.PdbName != nil && *pdb.PdbName == *pdbName && pdb.LifecycleState != "TERMINATED" && *pdb.ContainerDatabaseId == databaseId { + return true, pdb.Id, nil + } + } + } + + return false, nil, nil +} + +// Function to create KMS vault +func (r *DbcsSystemReconciler) createKMSVault(ctx context.Context, kmsConfig *databasev4.KMSConfig, kmsClient keymanagement.KmsManagementClient, kmsInst *databasev4.KMSDetailsStatus) (*keymanagement.CreateVaultResponse, error) { + // Dereference the ConfigurationProvider pointer + configProvider := *kmsClient.ConfigurationProvider() + + kmsVaultClient, err := keymanagement.NewKmsVaultClientWithConfigurationProvider(configProvider) + if err != nil { + r.Logger.Error(err, "Error creating KMS vault client") + return nil, err + } + var vaultType keymanagement.CreateVaultDetailsVaultTypeEnum + + if kmsConfig.VaultType != "" { + switch kmsConfig.VaultType { + case "VIRTUAL_PRIVATE": + vaultType = keymanagement.CreateVaultDetailsVaultTypeVirtualPrivate + case "EXTERNAL": + vaultType = keymanagement.CreateVaultDetailsVaultTypeExternal + case "DEFAULT": + vaultType = keymanagement.CreateVaultDetailsVaultTypeDefault + default: + err := fmt.Errorf("unsupported VaultType specified: %s", kmsConfig.VaultType) + r.Logger.Error(err, "unsupported VaultType specified") + return nil, err + } + } else { + // Default to DEFAULT if kmsConfig.VaultType is not defined + vaultType = keymanagement.CreateVaultDetailsVaultTypeDefault + } + + createVaultReq := keymanagement.CreateVaultRequest{ + CreateVaultDetails: keymanagement.CreateVaultDetails{ + CompartmentId: common.String(kmsConfig.CompartmentId), + DisplayName: common.String(kmsConfig.VaultName), + VaultType: vaultType, + }, + } + + resp, err := kmsVaultClient.CreateVault(ctx, createVaultReq) + if err != nil { + r.Logger.Error(err, "Error creating KMS vault") + return nil, err + } + // Wait until vault becomes active or timeout + timeout := time.After(5 * time.Minute) // Example timeout: 5 minutes + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + + for { + select { + case <-timeout: + r.Logger.Error(err, "timed out waiting for vault to become active") + case <-ticker.C: + getVaultReq := keymanagement.GetVaultRequest{ + VaultId: resp.Id, + } + + getResp, err := kmsVaultClient.GetVault(ctx, getVaultReq) + if err != nil { + r.Logger.Error(err, "Error getting vault status") + return nil, err + } + + if getResp.LifecycleState == keymanagement.VaultLifecycleStateActive { + r.Logger.Info("KMS vault created successfully and active") + // Save the vault details into KMSConfig + kmsInst.VaultId = *getResp.Vault.Id + kmsInst.ManagementEndpoint = *getResp.Vault.ManagementEndpoint + kmsInst.VaultName = *getResp.DisplayName + kmsInst.CompartmentId = *getResp.CompartmentId + kmsInst.VaultType = kmsConfig.VaultType + return &keymanagement.CreateVaultResponse{}, err + } + + r.Logger.Info(fmt.Sprintf("Vault state: %s, waiting for active state...", string(getResp.LifecycleState))) + } + } +} + +// Function to create KMS key +func (r *DbcsSystemReconciler) createKMSKey(ctx context.Context, kmsConfig *databasev4.KMSConfig, kmsClient keymanagement.KmsManagementClient, kmsInst *databasev4.KMSDetailsStatus) (*keymanagement.CreateKeyResponse, error) { + // Determine the KeyShape based on the encryption algorithm + var algorithm keymanagement.KeyShapeAlgorithmEnum + var keyLength int + switch kmsConfig.EncryptionAlgo { + case "AES": + algorithm = keymanagement.KeyShapeAlgorithmAes + keyLength = 32 + case "RSA": + algorithm = keymanagement.KeyShapeAlgorithmRsa + keyLength = 512 + default: + // Default to AES if the provided algorithm is unsupported + algorithm = keymanagement.KeyShapeAlgorithmAes + keyLength = 32 + r.Logger.Info("Unsupported encryption algorithm. Defaulting to AES.") + } + + // Create the key shape with the algorithm + keyShape := keymanagement.KeyShape{ + Algorithm: algorithm, + Length: common.Int(keyLength), + } + + createKeyReq := keymanagement.CreateKeyRequest{ + CreateKeyDetails: keymanagement.CreateKeyDetails{ + CompartmentId: common.String(kmsConfig.CompartmentId), + DisplayName: common.String(kmsConfig.KeyName), + KeyShape: &keyShape, + }, + RequestMetadata: common.RequestMetadata{}, + } + + // Call CreateKey without vaultID + resp, err := kmsClient.CreateKey(ctx, createKeyReq) + if err != nil { + r.Logger.Error(err, "Error creating KMS key:") + return nil, err + } + + r.Logger.Info("KMS key created successfully:", resp) + kmsInst.KeyId = *resp.Key.Id + kmsInst.EncryptionAlgo = string(algorithm) + return &resp, nil +} + +func (r *DbcsSystemReconciler) getSecret(ctx context.Context, namespace, secretName string) (string, error) { + secret := &corev1.Secret{} + err := r.KubeClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: secretName}, secret) + if err != nil { + return "", err + } + + // Assume the secret contains only one key-value pair + for _, value := range secret.Data { + return string(value), nil + } + + return "", fmt.Errorf("secret %s is empty", secretName) +} + +// func (r *DbcsSystemReconciler) cloneDbSystem(ctx context.Context, dbcsInst *databasev4.DbcsSystem, provider common.ConfigurationProvider) error { + +// // Initialize OCI clients +// dbClient, err := database.NewDatabaseClientWithConfigurationProvider(provider) +// if err != nil { +// return fmt.Errorf("failed to create OCI database client: %v", err) +// } + +// // Get DB System details +// compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, *dbcsInst.Status.Id) +// if err != nil { +// fmt.Printf("Failed to get compartment ID: %v\n", err) +// return err +// } + +// dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, *dbcsInst.Status.Id) +// if err != nil { +// fmt.Printf("Failed to get DB Home ID: %v\n", err) +// return err +// } + +// databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, *dbcsInst.Status.Id, compartmentId, dbHomeId) +// if err != nil { +// fmt.Printf("Failed to get database IDs: %v\n", err) +// return err +// } + +// // Use the first database ID for cloning +// if len(databaseIds) == 0 { +// return fmt.Errorf("no databases found in the DB system") +// } + +// // Retrieve details of the database to clone +// sourceDatabaseId := databaseIds[0] +// _, err = dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ +// DatabaseId: common.String(sourceDatabaseId), +// }) +// if err != nil { +// return fmt.Errorf("failed to get source database details: %v", err) +// } + +// // adminPassword, err := dbcsv1.GetAdminPassword(kubeClient, dbcsInstance) +// // if err != nil { +// // log.Fatalf("Error getting admin password: %v", err) +// // } + +// // tdePassword, err := GetTdePassword(kubeClient, dbcsInstance) +// // if err != nil { +// // log.Fatalf("Error getting TDE password: %v", err) +// // } + +// // Define the details for creating the database from the existing DB system +// // createDatabaseDetails := CreateDatabaseBaseWrapper{ +// // CreateDatabaseFromDbSystemDetails: database.CreateDatabaseFromDbSystemDetails{ +// // AdminPassword: common.String(adminPassword), // Replace with actual admin password +// // DbName: common.String(dbcsInst.Spec.DbSystem.DbName), // Use the dbName from DbcsSystemSpec +// // DbDomain: common.String(dbcsInst.Spec.DbSystem.DbDomain), // Use the dbDomain from DbcsSystemSpec +// // DbUniqueName: common.String(dbcsInst.Spec.DbSystem.DbUniqueName), // Use the dbUniqueName from DbcsSystemSpec +// // DbBackupConfig: &database.DbBackupConfig{ +// // AutoBackupEnabled: dbcsInst.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled, +// // RecoveryWindowInDays: dbcsInst.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, +// // }, +// // FreeformTags: dbcsInst.Spec.DbSystem.Tags, +// // DefinedTags: map[string]map[string]interface{}{ +// // "Namespace": { +// // "TagKey": "TagValue", // Replace with actual defined tags if needed +// // }, +// // }, +// // }, +// // } +// // createDatabaseRequest := database.CreateDatabaseRequest{ +// // CreateNewDatabaseDetails: &createDatabaseDetails, +// // } + +// // createDatabaseResponse, err := dbClient.CreateDatabase(ctx, createDatabaseRequest) +// // if err != nil { +// // return fmt.Errorf("failed to create database from DB system: %v", err) +// // } + +// // // Update instance status with the new database ID +// // dbcsInst.Status.DbInfo = append(dbcsInst.Status.DbInfo, databasev4.DbStatus{ +// // Id: createDatabaseResponse.Database.Id, +// // DbName: dbcsInst.Spec.DbSystem.DbName, +// // DbUniqueName: dbcsInst.Spec.DbSystem.DbUniqueName, +// // }) + +// // err = r.KubeClient.Status().Update(ctx, dbcsInst) +// // if err != nil { +// // return fmt.Errorf("failed to update instance status with database ID: %v", err) +// // } + +// return nil +// } + +// Convert DbBackupConfigAutoBackupWindowEnum to *string +func autoBackupWindowEnumToStringPtr(enum *database.DbBackupConfigAutoBackupWindowEnum) *string { + if enum == nil { + return nil + } + value := string(*enum) + return &value +} +func (r *DbcsSystemReconciler) stringToDbBackupConfigAutoBackupWindowEnum(value *string) (database.DbBackupConfigAutoBackupWindowEnum, error) { + // Define a default value + // Define a default value + const defaultAutoBackupWindow = database.DbBackupConfigAutoBackupWindowOne + + if value == nil { + return defaultAutoBackupWindow, nil // Return the default value + } + + // Convert to enum + enum, ok := database.GetMappingDbBackupConfigAutoBackupWindowEnum(*value) + if !ok { + return "", fmt.Errorf("invalid value for AutoBackupWindow: %s", *value) + } + return enum, nil +} + +func assignDBCSID(dbcsInst *databasev4.DbcsSystem, dbcsID string) { + dbcsInst.Spec.Id = &dbcsID +} + +func (r *DbcsSystemReconciler) eventFilterPredicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // Get the dbName as old dbName when an update event happens + oldObject := e.ObjectOld.DeepCopyObject().(*databasev4.DbcsSystem) + newObject := e.ObjectNew.DeepCopyObject().(*databasev4.DbcsSystem) + specObject := !reflect.DeepEqual(oldObject.Spec, newObject.Spec) + + deletionTimeStamp := !reflect.DeepEqual(oldObject.GetDeletionTimestamp(), newObject.GetDeletionTimestamp()) + + if specObject || deletionTimeStamp { + return true + } + + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *DbcsSystemReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&databasev4.DbcsSystem{}). + WithEventFilter(r.eventFilterPredicate()). + WithOptions(controller.Options{MaxConcurrentReconciles: 50}). + Complete(r) +} diff --git a/controllers/database/lrest_controller.go b/controllers/database/lrest_controller.go new file mode 100644 index 00000000..91c883e1 --- /dev/null +++ b/controllers/database/lrest_controller.go @@ -0,0 +1,1105 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + //"fmt" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + //lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" +) + +// LRESTReconciler reconciles a LREST object +type LRESTReconciler struct { + client.Client + Scheme *runtime.Scheme + Config *rest.Config + Log logr.Logger + Interval time.Duration + Recorder record.EventRecorder +} + +var ( + lrestPhaseInit = "Initializing" + lrestPhasePod = "CreatingPod" + lrestPhaseValPod = "ValidatingPods" + lrestPhaseService = "CreatingService" + lrestPhaseSecrets = "DeletingSecrets" + lrestPhaseReady = "Ready" + lrestPhaseDelete = "Deleting" + lrestPhaseFail = "Failed" +) + +const LRESTFinalizer = "database.oracle.com/LRESTfinalizer" + +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;services;configmaps;events;replicasets,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups=core,resources=pods;secrets;services;configmaps;namespaces,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LREST object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile +func (r *LRESTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + lrest := &dbapi.LREST{} + + // Execute for every reconcile + defer func() { + log.Info("DEFER", "Name", lrest.Name, "Phase", lrest.Status.Phase, "Status", strconv.FormatBool(lrest.Status.Status)) + if !lrest.Status.Status { + if err := r.Status().Update(ctx, lrest); err != nil { + log.Error(err, "Failed to update status for :"+lrest.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, lrest) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("LREST Resource Not found", "Name", lrest.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + lrest.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + log.Info("Res Status:", "Name", lrest.Name, "Phase", lrest.Status.Phase, "Status", strconv.FormatBool(lrest.Status.Status)) + + // Finalizer section + err = r.manageLRESTDeletion(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + + // If post-creation, LREST spec is changed, check and take appropriate action + if (lrest.Status.Phase == lrestPhaseReady) && lrest.Status.Status { + r.evaluateSpecChange(ctx, req, lrest) + } + + if !lrest.Status.Status { + phase := lrest.Status.Phase + log.Info("Current Phase:"+phase, "Name", lrest.Name) + + switch phase { + case lrestPhaseInit: + err = r.verifySecrets(ctx, req, lrest) + if err != nil { + lrest.Status.Phase = lrestPhaseFail + return requeueN, nil + } + lrest.Status.Phase = lrestPhasePod + case lrestPhasePod: + // Create LREST PODs + err = r.createLRESTInstances(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + lrest.Status.Phase = lrestPhaseValPod + case lrestPhaseValPod: + // Validate LREST PODs + err = r.validateLRESTPods(ctx, req, lrest) + if err != nil { + if lrest.Status.Phase == lrestPhaseFail { + return requeueN, nil + } + log.Info("Reconcile queued") + return requeueY, nil + } + lrest.Status.Phase = lrestPhaseService + case lrestPhaseService: + // Create LREST Service + err = r.createLRESTSVC(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + //lrest.Status.Phase = lrestPhaseSecrets + lrest.Status.Phase = lrestPhaseReady + case lrestPhaseSecrets: + // Delete LREST Secrets + //r.deleteSecrets(ctx, req, lrest) + lrest.Status.Phase = lrestPhaseReady + lrest.Status.Msg = "Success" + case lrestPhaseReady: + lrest.Status.Status = true + r.Status().Update(ctx, lrest) + return requeueN, nil + default: + lrest.Status.Phase = lrestPhaseInit + log.Info("DEFAULT:", "Name", lrest.Name, "Phase", phase, "Status", strconv.FormatBool(lrest.Status.Status)) + } + + if err := r.Status().Update(ctx, lrest); err != nil { + log.Error(err, "Failed to update status for :"+lrest.Name, "err", err.Error()) + } + return requeueY, nil + } + + log.Info("Reconcile completed") + return requeueN, nil +} + +/* +********************************************************* + - Create a ReplicaSet for pods based on the LREST container + /******************************************************* +*/ +func (r *LRESTReconciler) createLRESTInstances(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("createLRESTInstances", req.NamespacedName) + + replicaSet := r.createReplicaSetSpec(lrest) + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSet.Name, Namespace: lrest.Namespace}, foundRS) + if err != nil && apierrors.IsNotFound(err) { + log.Info("Creating LREST Replicaset: " + replicaSet.Name) + err = r.Create(ctx, replicaSet) + if err != nil { + log.Error(err, "Failed to create ReplicaSet for :"+lrest.Name, "Namespace", replicaSet.Namespace, "Name", replicaSet.Name) + return err + } + } else if err != nil { + log.Error(err, "Replicaset : "+replicaSet.Name+" already exists.") + return err + } + + // Set LREST instance as the owner and controller + ctrl.SetControllerReference(lrest, replicaSet, r.Scheme) + + log.Info("Created LREST ReplicaSet successfully") + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "CreatedLRESTReplicaSet", "Created LREST Replicaset (Replicas - %s) for %s", strconv.Itoa(lrest.Spec.Replicas), lrest.Name) + return nil +} + +/* +************************************************ + - Validate LREST Pod. Check if there are any errors + /*********************************************** +*/ +func (r *LRESTReconciler) validateLRESTPods(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("validateLRESTPod", req.NamespacedName) + + log.Info("Validating Pod creation for :" + lrest.Name) + + podName := lrest.Name + "-lrest" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + if len(podList.Items) == 0 { + log.Info("No pods found for: "+podName, "Namespace", req.Namespace) + lrest.Status.Msg = "Waiting for LREST Pod(s) to start" + return errors.New("Waiting for LREST pods to start") + } + + getLRESTStatus := " curl --cert /opt/oracle/lrest/certificates/tls.crt --cacert /opt/oracle/lrest/certificates/ca.crt --key /opt/oracle/lrest/certificates/tls.key -u `cat /opt/oracle/lrest/certificates/webserver_user`:`cat /opt/oracle/lrest/certificates/webserver_pwd` -sSkv -k -X GET https://localhost:" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + readyPods := 0 + for _, pod := range podList.Items { + if pod.Status.Phase == corev1.PodRunning { + // Get LREST Status + out, err := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getLRESTStatus) + if strings.Contains(out, "HTTP/1.1 200 OK") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 200 OK") || + strings.Contains(out, "HTTP/2") || strings.Contains(strings.ToUpper(err.Error()), " HTTP/2") { + readyPods++ + } else if strings.Contains(out, "HTTP/1.1 404 Not Found") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 404 NOT FOUND") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/2 404") || strings.Contains(strings.ToUpper(err.Error()), "Failed to connect to localhost") { + // Check if DB connection parameters are correct + getLRESTInstallStatus := " grep -q 'Failed to' /tmp/lrest_install.log; echo $?;" + out, _ := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getLRESTInstallStatus) + if strings.TrimSpace(out) == "0" { + lrest.Status.Msg = "Check DB connection parameters" + lrest.Status.Phase = lrestPhaseFail + // Delete existing ReplicaSet + r.deleteReplicaSet(ctx, req, lrest) + return errors.New("Check DB connection parameters") + } + } + } + } + + if readyPods != lrest.Spec.Replicas { + log.Info("Replicas: "+strconv.Itoa(lrest.Spec.Replicas), "Ready Pods: ", readyPods) + lrest.Status.Msg = "Waiting for LREST Pod(s) to be ready" + return errors.New("Waiting for LREST pods to be ready") + } + + lrest.Status.Msg = "" + return nil +} + +/* +*********************** + - Create Pod spec + +/*********************** +*/ +func (r *LRESTReconciler) createPodSpec(lrest *dbapi.LREST) corev1.PodSpec { + + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "secrets", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + DefaultMode: func() *int32 { i := int32(0666); return &i }(), + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTPubKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTPubKey.Secret.Key, + Path: lrest.Spec.LRESTPubKey.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTPriKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTPriKey.Secret.Key, + Path: lrest.Spec.LRESTPriKey.Secret.Key, + }, + }, + }, + }, + + /***/ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTTlsKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTTlsKey.Secret.Key, + Path: lrest.Spec.LRESTTlsKey.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTTlsCrt.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTTlsCrt.Secret.Key, + Path: lrest.Spec.LRESTTlsCrt.Secret.Key, + }, + }, + }, + }, + }, + }, + }, + }}, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + FSGroup: &[]int64{54321}[0], + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + /*InitContainers: []corev1.Container{{ + Image: lrest.Spec.LRESTImage, + Name: lrest.Name + "-init", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefineLrest(), + Command: []string{"echo test > /opt/oracle/lrest/certificates/tests"}, + Env: func() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: lrest.Spec.DBTnsurl, + }} + }(), + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/opt/oracle/lrest/certificates", + Name: "secrets", + ReadOnly: false, + }}, + }},*/ + Containers: []corev1.Container{{ + Image: lrest.Spec.LRESTImage, + Name: lrest.Name + "-lrest", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefineLrest(), + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/opt/oracle/lrest/certificates", + Name: "secrets", + ReadOnly: true, + }, + }, + Env: func() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: lrest.Spec.DBServer, + }, + { + Name: "DBTNSURL", + Value: lrest.Spec.DBTnsurl, + }, + { + Name: "TLSCRT", + Value: lrest.Spec.LRESTTlsCrt.Secret.Key, + }, + { + Name: "TLSKEY", + Value: lrest.Spec.LRESTTlsKey.Secret.Key, + }, + { + Name: "PUBKEY", + Value: lrest.Spec.LRESTPubKey.Secret.Key, + }, + { + Name: "PRVKEY", + Value: lrest.Spec.LRESTPriKey.Secret.Key, + }, + { + Name: "ORACLE_PORT", + Value: strconv.Itoa(lrest.Spec.DBPort), + }, + { + Name: "LREST_PORT", + Value: strconv.Itoa(lrest.Spec.LRESTPort), + }, + { + Name: "ORACLE_SERVICE", + Value: lrest.Spec.ServiceName, + }, + { + Name: "R1", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTAdminUser.Secret.SecretName, + }, + Key: lrest.Spec.LRESTAdminUser.Secret.Key, + }, + }, + }, + { + Name: "R2", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTAdminPwd.Secret.SecretName, + }, + Key: lrest.Spec.LRESTAdminPwd.Secret.Key, + }, + }, + }, + { + Name: "R3", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.WebLrestServerUser.Secret.SecretName, + }, + Key: lrest.Spec.WebLrestServerUser.Secret.Key, + }, + }, + }, + { + Name: "R4", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.WebLrestServerPwd.Secret.SecretName, + }, + Key: lrest.Spec.WebLrestServerPwd.Secret.Key, + }, + }, + }, + } + }(), + }}, + + NodeSelector: func() map[string]string { + ns := make(map[string]string) + if len(lrest.Spec.NodeSelector) != 0 { + for key, value := range lrest.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + } + + if len(lrest.Spec.LRESTImagePullSecret) > 0 { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: lrest.Spec.LRESTImagePullSecret, + }, + } + } + + podSpec.Containers[0].ImagePullPolicy = corev1.PullAlways + + if len(lrest.Spec.LRESTImagePullPolicy) > 0 { + if strings.ToUpper(lrest.Spec.LRESTImagePullPolicy) == "NEVER" { + podSpec.Containers[0].ImagePullPolicy = corev1.PullNever + } + } + + return podSpec +} + +/* +*********************** + - Create ReplicaSet spec + +/*********************** +*/ +func (r *LRESTReconciler) createReplicaSetSpec(lrest *dbapi.LREST) *appsv1.ReplicaSet { + + replicas := int32(lrest.Spec.Replicas) + podSpec := r.createPodSpec(lrest) + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest-rs", + Namespace: lrest.Namespace, + Labels: map[string]string{ + "name": lrest.Name + "-lrest-rs", + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest", + Namespace: lrest.Namespace, + Labels: map[string]string{ + "name": lrest.Name + "-lrest", + }, + }, + Spec: podSpec, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": lrest.Name + "-lrest", + }, + }, + }, + } + + return replicaSet +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *LRESTReconciler) deleteReplicaSet(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("deleteReplicaSet", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + return err + } + + replicaSetName := lrest.Name + "-lrest-rs" + err = k_client.AppsV1().ReplicaSets(lrest.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted LREST ReplicaSet", "RS Name", replicaSetName) + } + + return nil +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *LRESTReconciler) evaluateSpecChange(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("evaluateSpecChange", req.NamespacedName) + + // List the Pods matching the PodTemplate Labels + podName := lrest.Name + "-lrest" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + var foundPod corev1.Pod + for _, pod := range podList.Items { + foundPod = pod + break + } + + lrestSpecChange := false + for _, envVar := range foundPod.Spec.Containers[0].Env { + if envVar.Name == "ORACLE_HOST" && envVar.Value != lrest.Spec.DBServer { + lrestSpecChange = true + } else if envVar.Name == "ORACLE_PORT" && envVar.Value != strconv.Itoa(lrest.Spec.DBPort) { + lrestSpecChange = true + } else if envVar.Name == "LREST_PORT" && envVar.Value != strconv.Itoa(lrest.Spec.LRESTPort) { + lrestSpecChange = true + } else if envVar.Name == "ORACLE_SERVICE" && envVar.Value != lrest.Spec.ServiceName { + lrestSpecChange = true + } + } + + if lrestSpecChange { + // Delete existing ReplicaSet + err = r.deleteReplicaSet(ctx, req, lrest) + if err != nil { + return err + } + + lrest.Status.Phase = lrestPhaseInit + lrest.Status.Status = false + r.Status().Update(ctx, lrest) + } else { + // Update the RS if the value of "replicas" is changed + replicaSetName := lrest.Name + "-lrest-rs" + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSetName, Namespace: lrest.Namespace}, foundRS) + if err != nil { + log.Error(err, "Unable to get LREST Replicaset: "+replicaSetName) + return err + } + + // Check if number of replicas have changed + replicas := int32(lrest.Spec.Replicas) + if lrest.Spec.Replicas != int(*(foundRS.Spec.Replicas)) { + log.Info("Existing Replicas: " + strconv.Itoa(int(*(foundRS.Spec.Replicas))) + ", New Replicas: " + strconv.Itoa(lrest.Spec.Replicas)) + foundRS.Spec.Replicas = &replicas + err = r.Update(ctx, foundRS) + if err != nil { + log.Error(err, "Failed to update ReplicaSet for :"+lrest.Name, "Namespace", lrest.Namespace, "Name", replicaSetName) + return err + } + lrest.Status.Phase = lrestPhaseValPod + lrest.Status.Status = false + r.Status().Update(ctx, lrest) + } + } + + return nil +} + +/* +************************************************ + - Create a Cluster Service for LREST LREST Pod + /*********************************************** +*/ +func (r *LRESTReconciler) createLRESTSVC(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("createLRESTSVC", req.NamespacedName) + + foundSvc := &corev1.Service{} + err := r.Get(context.TODO(), types.NamespacedName{Name: lrest.Name + "-lrest", Namespace: lrest.Namespace}, foundSvc) + if err != nil && apierrors.IsNotFound(err) { + svc := r.createSvcSpec(lrest) + + log.Info("Creating a new Cluster Service for: "+lrest.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + err := r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new Cluster Service for: "+lrest.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + return err + } + + log.Info("Created LREST Cluster Service successfully") + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "CreatedLRESTService", "Created LREST Service for %s", lrest.Name) + } else { + log.Info("LREST Cluster Service already exists") + } + + return nil +} + +/* +*********************** + - Create Service spec + /*********************** +*/ +func (r *LRESTReconciler) createSvcSpec(lrest *dbapi.LREST) *corev1.Service { + + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest", + Namespace: lrest.Namespace, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "name": lrest.Name + "-lrest", + }, + ClusterIP: corev1.ClusterIPNone, + }, + } + // Set LREST instance as the owner and controller + ctrl.SetControllerReference(lrest, svc, r.Scheme) + return svc +} + +/* +************************************************ + - Check LREST deletion + /*********************************************** +*/ + +func (r *LRESTReconciler) manageLRESTDeletion(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("manageLRESTDeletion", req.NamespacedName) + + /* REGISTER FINALIZER */ + if lrest.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(lrest, LRESTFinalizer) { + controllerutil.AddFinalizer(lrest, LRESTFinalizer) + if err := r.Update(ctx, lrest); err != nil { + return err + } + } + + } else { + log.Info("lrest set to be deleted") + lrest.Status.Phase = lrestPhaseDelete + lrest.Status.Status = true + r.Status().Update(ctx, lrest) + + if controllerutil.ContainsFinalizer(lrest, LRESTFinalizer) { + + if err := r.DeletePDBS(ctx, req, lrest); err != nil { + log.Info("Cannot delete lrpdbs") + return err + } + + controllerutil.RemoveFinalizer(lrest, LRESTFinalizer) + if err := r.Update(ctx, lrest); err != nil { + return err + } + } + + err := r.deleteLRESTInstance(ctx, req, lrest) + if err != nil { + log.Info("Could not delete LREST Resource", "LREST Name", lrest.Spec.LRESTName, "err", err.Error()) + return err + } + + } + return nil +} + +/* +************************************************ + - Delete LREST Resource + +/*********************************************** +*/ +func (r *LRESTReconciler) deleteLRESTInstance(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("deleteLRESTInstance", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + } + + replicaSetName := lrest.Name + "-lrest-rs" + + err = k_client.AppsV1().ReplicaSets(lrest.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted LREST ReplicaSet", "RS Name", replicaSetName) + } + + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "DeletedLRESTReplicaSet", "Deleted LREST ReplicaSet for %s", lrest.Name) + + svcName := lrest.Name + "-lrest" + + err = k_client.CoreV1().Services(lrest.Namespace).Delete(context.TODO(), svcName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete Service", "Service Name", svcName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "DeletedLRESTService", "Deleted LREST Service for %s", lrest.Name) + log.Info("Successfully deleted LREST Service", "Service Name", svcName) + } + + log.Info("Successfully deleted LREST resource", "LREST Name", lrest.Spec.LRESTName) + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRESTReconciler) verifySecrets(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("verifySecrets", req.NamespacedName) + /* + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.SysAdminPwd.Secret.SecretName); err != nil { + return err + }*/ + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTAdminUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTAdminPwd.Secret.SecretName); err != nil { + return err + } + /* + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTPwd.Secret.SecretName); err != nil { + return err + }*/ + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.WebLrestServerUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.WebLrestServerPwd.Secret.SecretName); err != nil { + return err + } + + lrest.Status.Msg = "" + log.Info("Verified secrets successfully") + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRESTReconciler) checkSecret(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST, secretName string) error { + + log := r.Log.WithValues("checkSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrest.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrest.Status.Msg = "Secret not found:" + secretName + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + + return nil +} + +/* +************************************************ + - Delete Secrets + /*********************************************** +*/ +func (r *LRESTReconciler) deleteSecrets(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) { + + log := r.Log.WithValues("deleteSecrets", req.NamespacedName) + + log.Info("Deleting LREST secrets") + secret := &corev1.Secret{} + /* + err := r.Get(ctx, types.NamespacedName{Name: lrest.Spec.SysAdminPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.SysAdminPwd.Secret.SecretName) + } + } + */ + + err := r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTAdminUser.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTAdminUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTAdminPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTAdminPwd.Secret.SecretName) + } + } + /* + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTPwd.Secret.SecretName) + } + } + */ + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.WebLrestServerUser.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.WebLrestServerUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.WebLrestServerPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.WebLrestServerPwd.Secret.SecretName) + } + } +} + +/* +************************************************************* + - SetupWithManager sets up the controller with the Manager. + /************************************************************ +*/ +func (r *LRESTReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.LREST{}). + Owns(&appsv1.ReplicaSet{}). //Watch for deleted RS owned by this controller + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} + +func securityContextDefineLrest() *corev1.SecurityContext { + return &corev1.SecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + RunAsUser: &[]int64{54321}[0], + AllowPrivilegeEscalation: &[]bool{false}[0], + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + } +} + +func (r *LRESTReconciler) DeletePDBS(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("DeletePDBS", req.NamespacedName) + + /* =================== DELETE CASCADE ================ */ + if lrest.Spec.DeletePDBCascade == true { + log.Info("DELETE PDB CASCADE OPTION") + lrpdbList := &dbapi.LRPDBList{} + listOpts := []client.ListOption{} + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to get the list of pdbs") + } + + if err == nil { + for _, pdbitem := range lrpdbList.Items { + log.Info("pdbitem.Spec.CDBName:" + pdbitem.Spec.CDBName) + log.Info("lrest.Spec.LRESTName:" + lrest.Spec.LRESTName) + if pdbitem.Spec.CDBName == lrest.Spec.LRESTName { + fmt.Printf("DEVPHASE: Call Delete function for %s %s\n", pdbitem.Name, pdbitem.Spec.LRPDBName) + + var objmap map[string]interface{} /* Used for the return payload */ + values := map[string]string{ + "state": "CLOSE", + "modifyOption": "ABORT", + } + + url := "https://" + pdbitem.Spec.CDBResName + "-lrest." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + pdbitem.Spec.LRPDBName + + log.Info("callAPI(URL):" + url) + log.Info("pdbitem.Status.OpenMode" + pdbitem.Status.OpenMode) + + if pdbitem.Status.OpenMode != "MOUNTED" { + + log.Info("Force pdb closure") + respData, errapi := NewCallLAPI(r, ctx, req, &pdbitem, url, values, "POST") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + pdbitem.Status.SqlCode = int(objmap["sqlcode"].(float64)) + log.Info("pdb closure.......:", "sqlcode", pdbitem.Status.SqlCode) + + if errapi != nil { + log.Error(err, "callAPI cannot close pdb "+pdbitem.Spec.LRPDBName, "err", err.Error()) + return err + } + + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "close pdb", "pdbname=%s", pdbitem.Spec.LRPDBName) + } + + /* start dropping pdb */ + log.Info("Drop pluggable database") + values = map[string]string{ + "action": "INCLUDING", + } + respData, errapi := NewCallLAPI(r, ctx, req, &pdbitem, url, values, "DELETE") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + pdbitem.Status.SqlCode = int(objmap["sqlcode"].(float64)) + log.Info(".......:", "sqlcode", pdbitem.Status.SqlCode) + + if errapi != nil { + log.Error(err, "callAPI cannot drop pdb "+pdbitem.Spec.LRPDBName, "err", err.Error()) + return err + } + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "drop pdb", "pdbname=%s", pdbitem.Spec.LRPDBName) + + /* remove finalizer */ + + if controllerutil.ContainsFinalizer(&pdbitem, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(&pdbitem, LRPDBFinalizer) + err = r.Update(ctx, &pdbitem) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + } + + err = r.Delete(context.Background(), &pdbitem, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + + } /* check pdb name */ + } /* end of loop */ + } + + } + /* ================================================ */ + return nil +} diff --git a/controllers/database/lrpdb_controller.go b/controllers/database/lrpdb_controller.go new file mode 100644 index 00000000..1aadf65b --- /dev/null +++ b/controllers/database/lrpdb_controller.go @@ -0,0 +1,2381 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "bytes" + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + + //"encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/k8s" + lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Bitmask functions +const ( + MPAPPL = 0x00000001 /* The map config has been applyed */ + MPSYNC = 0x00000002 /* The map config is in sync with v$parameters where is default=flase */ + MPEMPT = 0x00000004 /* The map is empty - not specify */ + MPWARN = 0x00000008 /* Map applied with warnings */ + MPINIT = 0x00000010 /* Config map init */ + SPARE3 = 0x00000020 +) + +func bis(bitmask int, bitval int) int { + bitmask = ((bitmask) | (bitval)) + return bitmask +} + +func bit(bitmask int, bitval int) bool { + if bitmask&bitval != 0 { + return true + } else { + return false + } +} + +func bid(bitmask int, bitval int) int { + bitmask ^= ((bitval) & (bitmask)) + return bitmask +} + +func bitmaskprint(bitmask int) string { + BitRead := "|" + if bit(bitmask, MPAPPL) { + BitRead = strings.Join([]string{BitRead, "MPAPPL|"}, "") + } + if bit(bitmask, MPSYNC) { + BitRead = strings.Join([]string{BitRead, "MPSYNC|"}, "") + } + if bit(bitmask, MPEMPT) { + BitRead = strings.Join([]string{BitRead, "MPEMPT|"}, "") + } + if bit(bitmask, MPWARN) { + BitRead = strings.Join([]string{BitRead, "MPWARN|"}, "") + } + if bit(bitmask, MPINIT) { + BitRead = strings.Join([]string{BitRead, "MPINIT|"}, "") + } + if bit(bitmask, SPARE3) { + BitRead = strings.Join([]string{BitRead, "SPARE3|"}, "") + } + + return BitRead +} + +// LRPDBReconciler reconciles a LRPDB object +type LRPDBReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Interval time.Duration + Recorder record.EventRecorder +} + +type restSQLCollection struct { + Env struct { + DefaultTimeZone string `json:"defaultTimeZone,omitempty"` + } `json:"env"` + Items []SQL_Item `json:"items"` +} + +type SQL_Item struct { + StatementId int `json:"statementId,omitempty"` + Response []string `json:"response"` + ErrorCode int `json:"errorCode,omitempty"` + ErrorLine int `json:"errorLine,omitempty"` + ErrorColumn int `json:"errorColumn,omitempty"` + ErrorDetails string `json:"errorDetails,omitempty"` + Result int `json:"result,omitempty"` +} + +type LRESTError struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` + Instance string `json:"instance,omitempty"` +} + +var ( + lrpdbPhaseCreate = "Creating" + lrpdbPhasePlug = "Plugging" + lrpdbPhaseUnplug = "Unplugging" + lrpdbPhaseClone = "Cloning" + lrpdbPhaseFinish = "Finishing" + lrpdbPhaseReady = "Ready" + lrpdbPhaseDelete = "Deleting" + lrpdbPhaseModify = "Modifying" + lrpdbPhaseMap = "Mapping" + lrpdbPhaseStatus = "CheckingState" + lrpdbPhaseFail = "Failed" + lrpdbPhaseAlterPlug = "AlterPlugDb" + lrpdbPhaseSpare = "NoAction" +) + +const LRPDBFinalizer = "database.oracle.com/LRPDBfinalizer" + +var tde_Password string +var tde_Secret string +var flood_control bool = false +var assertiveLpdbDeletion bool = false /* Global variable for assertive pdb deletion */ +/* + We need to record the config map name after pdb creation + in order to use it during open and clone op if config map + name is not set the open and clone yaml file +*/ +var globalconfigmap string +var globalsqlcode int + +/* mind https://github.com/kubernetes-sigs/kubebuilder/issues/549 */ +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=events,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs/finalizers,verbs=get;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LRPDB object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile +func (r *LRPDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + lrpdb := &dbapi.LRPDB{} + + // Execute for every reconcile + defer func() { + //log.Info("DEFER LRPDB", "Name", lrpdb.Name, "Phase", lrpdb.Status.Phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + if !lrpdb.Status.Status { + if lrpdb.Status.Phase == lrpdbPhaseReady { + lrpdb.Status.Status = true + } + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("LRPDB Resource Not found", "Name", lrpdb.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + lrpdb.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + // Finalizer section + err = r.manageLRPDBDeletion2(ctx, req, lrpdb) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + + // Check for Duplicate LRPDB + if !lrpdb.Status.Status { + err = r.checkDuplicateLRPDB(ctx, req, lrpdb) + if err != nil { + return requeueN, nil + } + } + + action := strings.ToUpper(lrpdb.Spec.Action) + /* + Bug 36714702 - LREST OPERATOR - POST ALTER PDB OPTION LRPDB STATUS INTERMITTENTLY + SHOWS "WAITING FOR LRPDB PARAMETER TO BE MODIFIED" + introducing additional check to avoid alter system repetition during + reconciliation loop + */ + if lrpdb.Status.Phase == lrpdbPhaseReady { + if (lrpdb.Status.Action != "" || action != "NOACTION") && (action == "ALTER" || action == "MODIFY" || action == "STATUS" || lrpdb.Status.Action != action) { + lrpdb.Status.Status = false + } else { + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + lrpdb.Status.Phase = lrpdbPhaseFail + } else { + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Status.Msg = "Success" + } + r.Status().Update(ctx, lrpdb) + } + } + + if !lrpdb.Status.Status { + r.validatePhase(ctx, req, lrpdb) + phase := lrpdb.Status.Phase + log.Info("LRPDB:", "Name", lrpdb.Name, "Phase", phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + + switch phase { + case lrpdbPhaseCreate: + err = r.createLRPDB(ctx, req, lrpdb) + case lrpdbPhaseClone: + err = r.cloneLRPDB(ctx, req, lrpdb) + case lrpdbPhasePlug: + err = r.plugLRPDB(ctx, req, lrpdb) + case lrpdbPhaseUnplug: + err = r.unplugLRPDB(ctx, req, lrpdb) + case lrpdbPhaseModify: + err = r.modifyLRPDB(ctx, req, lrpdb) + case lrpdbPhaseDelete: + err = r.deleteLRPDB(ctx, req, lrpdb) + case lrpdbPhaseStatus: + err = r.getLRPDBState(ctx, req, lrpdb) + case lrpdbPhaseMap: + err = r.mapLRPDB(ctx, req, lrpdb) + case lrpdbPhaseFail: + err = r.mapLRPDB(ctx, req, lrpdb) + case lrpdbPhaseAlterPlug: + err = r.alterSystemLRPDB(ctx, req, lrpdb) + default: + log.Info("DEFAULT:", "Name", lrpdb.Name, "Phase", phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + return requeueN, nil + } + lrpdb.Status.Action = strings.ToUpper(lrpdb.Spec.Action) + if err != nil { + lrpdb.Status.Phase = lrpdbPhaseFail + lrpdb.Status.SqlCode = globalsqlcode + } else { + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Status.Msg = "Success" + } + } + + r.ManageConfigMapForCloningAndPlugin(ctx, req, lrpdb) + lrpdb.Status.BitStatStr = bitmaskprint(lrpdb.Status.Bitstat) + + log.Info("Reconcile completed") + return requeueY, nil +} + +/* +************************************************ + - Validate the LRPDB Spec + /*********************************************** +*/ +func (r *LRPDBReconciler) validatePhase(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) { + + log := r.Log.WithValues("validatePhase", req.NamespacedName) + + action := strings.ToUpper(lrpdb.Spec.Action) + + log.Info("Validating LRPDB phase for: "+lrpdb.Name, "Action", action) + + switch action { + case "CREATE": + lrpdb.Status.Phase = lrpdbPhaseCreate + case "CLONE": + lrpdb.Status.Phase = lrpdbPhaseClone + case "PLUG": + lrpdb.Status.Phase = lrpdbPhasePlug + case "UNPLUG": + lrpdb.Status.Phase = lrpdbPhaseUnplug + case "MODIFY": + lrpdb.Status.Phase = lrpdbPhaseModify + case "DELETE": + lrpdb.Status.Phase = lrpdbPhaseDelete + case "STATUS": + lrpdb.Status.Phase = lrpdbPhaseStatus + case "MAP": + lrpdb.Status.Phase = lrpdbPhaseMap + case "ALTER": + lrpdb.Status.Phase = lrpdbPhaseAlterPlug + case "NOACTION": + lrpdb.Status.Phase = lrpdbPhaseStatus + + } + + log.Info("Validation complete") +} + +/* + This function scans the list of crd + pdb to verify the existence of the + pdb (crd) that we want to clone. + Bug 36752925 - LREST OPERATOR - CLONE NON-EXISTENT + PDB CREATES A LRPDB WITH STATUS FAILED + + return 1 - CRD found + return 0 - CRD not found / Stop clone process + + Bug 36753107 - LREST OPERATOR - CLONE + CLOSED PDB SUCCESSFULLY CLONES + +*/ + +func (r *LRPDBReconciler) checkPDBforCloninig(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, targetPdbName string) (int, error) { + log := r.Log.WithValues("checkDuplicateLRPDB", req.NamespacedName) + var pdbCounter int + pdbCounter = 0 + + lrpdbList := &dbapi.LRPDBList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingFields{"spec.pdbName": targetPdbName}} + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to list lrpdbs", "Namespace", req.Namespace, "Error", err) + return 0, err + } + if len(lrpdbList.Items) == 0 { + log.Info("No pdbs available") + return pdbCounter, err + } + + for _, p := range lrpdbList.Items { + fmt.Printf("DEBUGCLONE %s %s %i\n", p.Spec.LRPDBName, targetPdbName, pdbCounter) + if p.Spec.LRPDBName == targetPdbName { + log.Info("Found " + targetPdbName + " in the crd list") + if p.Status.OpenMode == "MOUNTED" { + log.Info("Cannot clone a mounted pdb") + return pdbCounter, err + } + pdbCounter++ + fmt.Printf("DEBUGCLONE %s %s %i\n", p.Spec.LRPDBName, targetPdbName, pdbCounter) + return pdbCounter, err + } + + } + return pdbCounter, err +} + +/* +*************************************************************** + - Check for Duplicate LRPDB. Same LRPDB name on the same LREST resource. + +/************************************************************** +*/ +func (r *LRPDBReconciler) checkDuplicateLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("checkDuplicateLRPDB", req.NamespacedName) + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + //lrestame := lrpdb.Spec.LRESTName + + // Name of the LRPDB resource + lrpdbResName := lrpdb.Spec.LRPDBName + + lrpdbList := &dbapi.LRPDBList{} + + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingFields{"spec.pdbName": lrpdbResName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to list lrpdbs", "Namespace", req.Namespace, "Error", err) + return err + } + + if len(lrpdbList.Items) == 0 { + log.Info("No lrpdbs found for LRPDBName: "+lrpdbResName, "CDBResName", lrestResName) + return nil + } + + for _, p := range lrpdbList.Items { + log.Info("Found LRPDB: " + p.Name) + if (p.Name != lrpdb.Name) && (p.Spec.CDBResName == lrestResName) { + log.Info("Duplicate LRPDB found") + lrpdb.Status.Msg = "LRPDB Resource already exists" + lrpdb.Status.Status = false + lrpdb.Status.Phase = lrpdbPhaseFail + return errors.New("Duplicate LRPDB found") + } + } + return nil +} + +/* +*************************************************************** + - Get the Custom Resource for the LREST mentioned in the LRPDB Spec + /************************************************************** +*/ +func (r *LRPDBReconciler) getLRESTResource(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (dbapi.LREST, error) { + + log := r.Log.WithValues("getLRESTResource", req.NamespacedName) + + var lrest dbapi.LREST // LREST CR corresponding to the LREST name specified in the LRPDB spec + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + lrestNamespace := lrpdb.Spec.CDBNamespace + + log.Info("lrestResName...........:" + lrestResName) + log.Info("lrestNamespace.........:" + lrestNamespace) + + // Get LREST CR corresponding to the LREST name specified in the LRPDB spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: lrestNamespace, + Name: lrestResName, + }, &lrest) + + if err != nil { + log.Info("Failed to get CRD for LREST", "Name", lrestResName, "Namespace", lrestNamespace, "Error", err.Error()) + lrpdb.Status.Msg = "Unable to get CRD for LREST : " + lrestResName + r.Status().Update(ctx, lrpdb) + return lrest, err + } + + log.Info("Found CR for LREST", "Name", lrestResName, "CR Name", lrest.Name) + return lrest, nil +} + +/* +*************************************************************** + - Get the LREST Pod for the LREST mentioned in the LRPDB Spec + /************************************************************** +*/ +func (r *LRPDBReconciler) getLRESTPod(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (corev1.Pod, error) { + + log := r.Log.WithValues("getLRESTPod", req.NamespacedName) + + var lrestPod corev1.Pod // LREST Pod container with connection to the concerned LREST + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + + // Get LREST Pod associated with the LREST Name specified in the LRPDB Spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: req.Namespace, + Name: lrestResName + "-lrest", + }, &lrestPod) + + if err != nil { + log.Info("Failed to get Pod for LREST", "Name", lrestResName, "Namespace", req.Namespace, "Error", err.Error()) + lrpdb.Status.Msg = "Unable to get LREST Pod for LREST : " + lrestResName + return lrestPod, err + } + + log.Info("Found LREST Pod for LREST", "Name", lrestResName, "Pod Name", lrestPod.Name, "LREST Container hostname", lrestPod.Spec.Hostname) + return lrestPod, nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRPDBReconciler) getSecret(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, secretName string, keyName string) (string, error) { + + log := r.Log.WithValues("getSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrpdb.Status.Msg = "Secret not found:" + secretName + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + return string(secret.Data[keyName]), nil +} + +/* +************************************************ + - Issue a REST API Call to the LREST container + /*********************************************** +*/ +func (r *LRPDBReconciler) callAPI(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, url string, payload map[string]string, action string) (string, error) { + log := r.Log.WithValues("callAPI", req.NamespacedName) + + var err error + + secret := &corev1.Secret{} + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[lrpdb.Spec.LRPDBTlsKey.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[lrpdb.Spec.LRPDBTlsCrt.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCat.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[lrpdb.Spec.LRPDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaCertPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + lrpdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + /* + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool} + */ + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool, + //MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + webUser, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName, lrpdb.Spec.WebLrpdbServerUser.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to get webuser account name ") + return "", err + } + + webUserPwd, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName, lrpdb.Spec.WebLrpdbServerPwd.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to get webuser account password ") + return "", err + } + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for LRPDB : "+lrpdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to LREST Pod", "err", err.Error()) + lrpdb.Status.Msg = "Error: Could not connect to LREST Pod" + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", errmsg) + return "", err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "Done", lrpdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + lrpdb.Status.ConnString = "" + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " not found" + + } else { + if flood_control == false { + lrpdb.Status.Msg = "LREST Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if flood_control == false { + log.Info("LREST Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr LRESTError + json.Unmarshal([]byte(bb), &apiErr) + if flood_control == false { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", "Failed: %s", apiErr.Message) + } + fmt.Printf("\n================== APIERR ======================\n") + fmt.Printf("%+v \n", apiErr) + fmt.Printf(string(bb)) + fmt.Printf("URL=%s\n", url) + fmt.Printf("resp.StatusCode=%s\n", strconv.Itoa(resp.StatusCode)) + fmt.Printf("\n================== APIERR ======================\n") + flood_control = true + return "", errors.New("LREST Error") + } + flood_control = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + fmt.Print("CALL API return msg.....:") + fmt.Println(string(bodyBytes)) + + var apiResponse restSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + fmt.Printf("===> %#v\n", apiResponse) + fmt.Printf("===> %+v\n", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("LREST Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + lrpdb.Status.Msg = sqlItem.ErrorDetails + } + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} + +/* +************************************************ + - Create a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) createLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("createLRPDB", req.NamespacedName) + + var err error + var tde_Password string + var tde_Secret string + + log.Info("call getLRESTResource \n") + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + lrpdbAdminName, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.AdminpdbUser.Secret.SecretName, lrpdb.Spec.AdminpdbUser.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to find pdb admin user ") + return err + } + + lrpdbAdminPwd, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.AdminpdbPass.Secret.SecretName, lrpdb.Spec.AdminpdbPass.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + + if err != nil { + log.Error(err, "Unable to find pdb admin password ") + return err + } + + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check LRPDB not existence completed", "LRPDB Name", lrpdb.Spec.LRPDBName) + } + + } else { + + lrpdb.Status.Phase = lrpdbPhaseFail + lrpdb.Status.Msg = "PDB " + lrpdb.Spec.LRPDBName + " already exists " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + log.Info("Database already exists ", "LRPDB Name", lrpdb.Spec.LRPDBName) + err := fmt.Errorf("%v", 65012) + return err + } + + values := map[string]string{ + "method": "CREATE", + "pdb_name": lrpdb.Spec.LRPDBName, + "adminName": lrpdbAdminName, + "adminPwd": lrpdbAdminPwd, + "fileNameConversions": lrpdb.Spec.FileNameConversions, + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "totalSize": lrpdb.Spec.TotalSize, + "tempSize": lrpdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + fmt.Printf("===== PAYLOAD ===\n") + fmt.Print(" method ", values["method"], "\n") + fmt.Print(" pdb_name ", values["pdb_name"], "\n") + fmt.Print(" adminName ", values["adminName"], "\n") + fmt.Print(" adminPwd --------------\n") + fmt.Print(" fileNameConversions ", values["fileNameConversions"], "\n") + fmt.Print(" unlimitedStorage ", values["unlimitedStorage"], "\n") + fmt.Print(" reuseTempFile ", values["reuseTempFile"], "\n") + fmt.Print(" tempSize ", values["tempSize"], "\n") + fmt.Print(" totalSize ", values["totalSize"], "\n") + fmt.Print(" getScript ", values["getScript"], "\n") + + if *(lrpdb.Spec.LTDEImport) { + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + } + + //url := "https://" + lrpdb.Spec.CDBResName + "-lrest:" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + url := r.BaseUrl(ctx, req, lrpdb, lrest) + fmt.Print("============================================================\n") + fmt.Print(url) + fmt.Print("\n============================================================\n") + lrpdb.Status.TotalSize = lrpdb.Spec.TotalSize + lrpdb.Status.Phase = lrpdbPhaseCreate + lrpdb.Status.Msg = "Waiting for LRPDB to be created" + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + if lrpdb.Status.SqlCode != 0 { + err := fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, + "Created", "LRPDB '%s' created successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = + lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + log.Info("Parsing connectstring") + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + r.getLRPDBState(ctx, req, lrpdb) + log.Info("Created LRPDB Resource", "LRPDB Name", lrpdb.Spec.LRPDBName) + + if bit(lrpdb.Status.Bitstat, MPINIT) == false { + r.InitConfigMap(ctx, req, lrpdb) + Cardinality, _ := r.ApplyConfigMap(ctx, req, lrpdb) + log.Info("Config Map Cardinality " + strconv.Itoa(int(Cardinality))) + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + return nil +} + +/* +************************************************ + - Clone a LRPDB + /*********************************************** +*/ +func (r *LRPDBReconciler) cloneLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + if lrpdb.Spec.LRPDBName == lrpdb.Spec.SrcLRPDBName { + return nil + } + + log := r.Log.WithValues("cloneLRPDB", req.NamespacedName) + + globalsqlcode = 0 + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + /* Prevent cloning an existing lrpdb */ + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check LRPDB not existence completed", "LRPDB Name", lrpdb.Spec.LRPDBName) + } + + } else { + log.Info("Database already exists ", "LRPDB Name", lrpdb.Spec.LRPDBName) + return nil + } + + values := map[string]string{ + "method": "CLONE", + "pdb_name": lrpdb.Spec.LRPDBName, + "srcPdbName": lrpdb.Spec.SrcLRPDBName, + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + //* check the existence of lrpdb.Spec.SrcLRPDBName // + var allErrs field.ErrorList + pdbCounter, _ := r.checkPDBforCloninig(ctx, req, lrpdb, lrpdb.Spec.SrcLRPDBName) + if pdbCounter == 0 { + log.Info("target pdb " + lrpdb.Spec.SrcLRPDBName + " does not exists or is not open") + allErrs = append(allErrs, field.NotFound(field.NewPath("Spec").Child("LRPDBName"), " "+lrpdb.Spec.LRPDBName+" does not exist : failure")) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + + if lrpdb.Spec.SparseClonePath != "" { + values["sparseClonePath"] = lrpdb.Spec.SparseClonePath + } + if lrpdb.Spec.FileNameConversions != "" { + values["fileNameConversions"] = lrpdb.Spec.FileNameConversions + } + if lrpdb.Spec.TotalSize != "" { + values["totalSize"] = lrpdb.Spec.TotalSize + } + if lrpdb.Spec.TempSize != "" { + values["tempSize"] = lrpdb.Spec.TempSize + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + "/" + + lrpdb.Status.Phase = lrpdbPhaseClone + lrpdb.Status.Msg = "Waiting for LRPDB to be cloned" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode != 0 { + errclone := errors.New("Cannot clone database: ora-" + strconv.Itoa(lrpdb.Status.SqlCode)) + log.Info("Cannot clone database ora-" + strconv.Itoa(lrpdb.Status.SqlCode)) + lrpdb.Status.Msg = lrpdb.Spec.SrcLRPDBName + " is open in mount cannot clone " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + return errclone + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "LRPDB '%s' cloned successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + + } + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Clone", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + log.Info("Cloned LRPDB successfully", "Source LRPDB Name", lrpdb.Spec.SrcLRPDBName, "Clone LRPDB Name", lrpdb.Spec.LRPDBName) + r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Plug a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) plugLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("plugLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + var tde_Password string + var tde_Secret string + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "PLUG", + "xmlFileName": lrpdb.Spec.XMLFileName, + "pdb_name": lrpdb.Spec.LRPDBName, + "sourceFileNameConversions": lrpdb.Spec.SourceFileNameConversions, + "copyAction": lrpdb.Spec.CopyAction, + "fileNameConversions": lrpdb.Spec.FileNameConversions, + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "totalSize": lrpdb.Spec.TotalSize, + "tempSize": lrpdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if *(lrpdb.Spec.LTDEImport) { + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + values["tdeImport"] = strconv.FormatBool(*(lrpdb.Spec.LTDEImport)) + } + if *(lrpdb.Spec.AsClone) { + values["asClone"] = strconv.FormatBool(*(lrpdb.Spec.AsClone)) + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + + lrpdb.Status.TotalSize = lrpdb.Spec.TotalSize + lrpdb.Status.Phase = lrpdbPhasePlug + lrpdb.Status.Msg = "Waiting for LRPDB to be plugged" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode != 0 { + log.Info("Plug database failure........:" + strconv.Itoa(lrpdb.Status.SqlCode)) + err = fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "LRPDB '%s' plugged successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + log.Info("Parsing connectstring") + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Plug", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + log.Info("Successfully plugged LRPDB", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Unplug a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) unplugLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("unplugLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + var tde_Password string + var tde_Secret string + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "UNPLUG", + "xmlFileName": lrpdb.Spec.XMLFileName, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if *(lrpdb.Spec.LTDEExport) { + // Get the TDE Password + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + values["tdeExport"] = strconv.FormatBool(*(lrpdb.Spec.LTDEExport)) + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + "/" + + log.Info("CallAPI(url)", "url", url) + lrpdb.Status.Phase = lrpdbPhaseUnplug + lrpdb.Status.Msg = "Waiting for LRPDB to be unplugged" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + + if lrpdb.Status.SqlCode != 0 { + globalsqlcode = lrpdb.Status.SqlCode + + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " database cannot be unplugged " + log.Info(lrpdb.Spec.LRPDBName + " database cannot be unplugged ") + if lrpdb.Status.SqlCode == 65170 { + log.Info(lrpdb.Spec.XMLFileName + " xml file already exists ") + } + + /* + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Fail to update crd", "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status"+lrpdb.Name, "err", err.Error()) + return err + } + */ + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Unplugged", " ORA-%s ", strconv.Itoa(lrpdb.Status.SqlCode)) + err = fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err = r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = true + err = r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Unplugged", "LRPDB '%s' unplugged successfully", lrpdb.Spec.LRPDBName) + globalsqlcode = 0 + log.Info("Successfully unplugged LRPDB resource") + return nil +} + +/************************************************** +Alter system LRPDB +**************************************************/ + +/**just push the trasnsaction **/ +func (r *LRPDBReconciler) alterSystemLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("alterSystemLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + return nil + } + return err + } + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot find LREST server") + return err + } + + /* alter system payload */ + + values := map[string]string{ + "state": "ALTER", + "alterSystemParameter": lrpdb.Spec.AlterSystemParameter, + "alterSystemValue": lrpdb.Spec.AlterSystemValue, + "parameterScope": lrpdb.Spec.ParameterScope, + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + log.Info("alter system payload...:", "lrpdb.Spec.AlterSystemValue=", lrpdb.Spec.AlterSystemValue) + log.Info("alter system payload...:", "lrpdb.Spec.AlterSystemParameter=", lrpdb.Spec.AlterSystemParameter) + log.Info("alter system payload...:", "lrpdb.Spec.ParameterScope=", lrpdb.Spec.ParameterScope) + log.Info("alter system path.......:", "url=", url) + + lrpdb.Status.Phase = lrpdbPhaseAlterPlug + lrpdb.Status.ModifyOption = lrpdb.Spec.AlterSystem + " " + lrpdb.Spec.ParameterScope + lrpdb.Status.Msg = "Waiting for LRPDB parameter to be modified" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode == 0 { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Altered", "LRPDB(name,cmd,sqlcode) '%s %s %d' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.AlterSystem, lrpdb.Status.SqlCode) + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Spec.Action = "Noaction" + lrpdb.Status.Action = "Noaction" + lrpdb.Status.Status = true + + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Cannot rest lrpdb Spec :"+lrpdb.Name, "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return nil + + } + + if lrpdb.Status.SqlCode != 0 { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "alter system failure", "LRPDB(name,cmd,sqlcode) '%s %s %d' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.AlterSystem, lrpdb.Status.SqlCode) + erralter := errors.New("Error: cannot modify parameter") + + lrpdb.Status.ModifyOption = lrpdb.Spec.AlterSystem + " " + lrpdb.Spec.ParameterScope + lrpdb.Status.Msg = "Failed: cannot modify system parameter" + lrpdb.Status.Phase = lrpdbPhaseStatus + lrpdb.Spec.AlterSystem = "" + lrpdb.Spec.ParameterScope = "" + lrpdb.Spec.Action = "Noaction" + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Cannot rest lrpdb Spec :"+lrpdb.Name, "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return erralter + } + + lrpdb.Status.Status = false + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return nil +} + +/************************************************* + * Modify a LRPDB state + ***********************************************/ +func (r *LRPDBReconciler) modifyLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("modifyLRPDB", req.NamespacedName) + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Modify", "Info:'%s %s %s' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.LRPDBState, lrpdb.Status.ModifyOption) + + var err error + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if lrpdb.Status.SqlCode == 1403 { + // BUG 36752465 + // We have to handle to verify a non existings results using both + log.Info("Database does not exists ") + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + if apierrors.IsNotFound(err) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + return err + } + + /* This scenario is managed by webhook acceptance test ... leave it here anyway */ + if lrpdb.Status.OpenMode == "READ WRITE" && lrpdb.Spec.LRPDBState == "OPEN" && lrpdb.Spec.ModifyOption == "READ WRITE" { + /* Database is already open no action required */ + return nil + } + + if lrpdb.Status.OpenMode == "MOUNTED" && lrpdb.Spec.LRPDBState == "CLOSE" && lrpdb.Spec.ModifyOption == "IMMEDIATE" { + /* Database is already close no action required */ + return nil + } + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{} + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + values = map[string]string{ + "state": lrpdb.Spec.LRPDBState, + "modifyOption": lrpdb.Spec.ModifyOption, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + log.Info("MODIFY LRPDB", "lrpdb.Spec.LRPDBState=", lrpdb.Spec.LRPDBState, "lrpdb.Spec.ModifyOption=", lrpdb.Spec.ModifyOption) + log.Info("LRPDB STATUS OPENMODE", "lrpdb.Status.OpenMode=", lrpdb.Status.OpenMode) + } + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + + lrpdb.Status.Phase = lrpdbPhaseModify + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + lrpdb.Status.ModifyOption = lrpdb.Spec.LRPDBState + "-" + lrpdb.Spec.ModifyOption + } + + lrpdb.Status.Msg = "Waiting for LRPDB to be modified" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Modified", " '%s' modified successfully '%s'", lrpdb.Spec.LRPDBName, lrpdb.Spec.LRPDBState) + } + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + + } + + lrpdb.Status.Msg = "alter lrpdb completed" + lrpdb.Status.Status = false + lrpdb.Status.Phase = lrpdbPhaseReady + + log.Info("Successfully modified LRPDB state", "LRPDB Name", lrpdb.Spec.LRPDBName) + + /* After database openining we reapply the config map if warning is present */ + if lrpdb.Spec.LRPDBState == "OPEN" { + if bit(lrpdb.Status.Bitstat, MPWARN|MPINIT) { + log.Info("re-apply config map") + r.ApplyConfigMap(ctx, req, lrpdb) + + } + } + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + //r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Get LRPDB State + /*********************************************** +*/ +func (r *LRPDBReconciler) getLRPDBState(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("getLRPDBState", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + + lrpdb.Status.Msg = "Getting LRPDB state" + fmt.Print("============================\n") + fmt.Println(lrpdb.Status) + fmt.Print("============================\n") + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, nil, "GET") + if err != nil { + log.Info("Begin respData") + log.Info(respData) + log.Info("End respData") + lrpdb.Status.Msg = "getLRPDBState failure : check lrpdb status" + lrpdb.Status.Status = false + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode == 1403 { + lrpdb.Status.OpenMode = "unknown" + lrpdb.Status.Msg = "check lrpdb status" + lrpdb.Status.Status = false + return errors.New("NO_DATA_FOUND") + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed to get state of LRPDB :"+lrpdbName, "err", err.Error()) + } + lrpdb.Status.OpenMode = objmap["open_mode"].(string) + + /* if lrpdb.Status.Phase == lrpdbPhaseCreate && sqlcode == 1403 { + + if lrpdb.Status.OpenMode == "READ WRITE" { + err := r.mapLRPDB(ctx, req, lrpdb) + if err != nil { + log.Info("Fail to Map resource getting LRPDB state") + } + } + + if lrpdb.Status.OpenMode == "MOUNTED" { + err := r.mapLRPDB(ctx, req, lrpdb) + if err != nil { + log.Info("Fail to Map resource getting LRPDB state") + } + } + }*/ + + lrpdb.Status.Msg = "check lrpdb ok" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + log.Info("Successfully obtained LRPDB state", "LRPDB Name", lrpdb.Spec.LRPDBName, "State", objmap["open_mode"].(string)) + return nil +} + +/* +************************************************ + - Map Database LRPDB to Kubernetes LRPDB CR + +/*********************************************** +*/ +func (r *LRPDBReconciler) mapLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("mapLRPDB", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + log.Info("callapi get to map lrpdb") + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + log.Info("DEBUG NEW URL " + url) + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, nil, "GET") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed json.Unmarshal :"+lrpdbName, "err", err.Error()) + } + + //fmt.Printf("%+v\n", objmap) + totSizeInBytes := objmap["total_size"].(float64) + totSizeInGB := totSizeInBytes / 1024 / 1024 / 1024 + + lrpdb.Status.OpenMode = objmap["open_mode"].(string) + lrpdb.Status.TotalSize = fmt.Sprintf("%4.2f", totSizeInGB) + "G" + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Map", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + lrpdb.Status.Phase = lrpdbPhaseReady + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + log.Info("Successfully mapped LRPDB to Kubernetes resource", "LRPDB Name", lrpdb.Spec.LRPDBName) + lrpdb.Status.Status = true + return nil +} + +/* +************************************************ + - Delete a LRPDB + /*********************************************** +*/ +func (r *LRPDBReconciler) deleteLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("deleteLRPDB", req.NamespacedName) + + errstate := r.getLRPDBState(ctx, req, lrpdb) + if errstate != nil { + if lrpdb.Status.SqlCode == 1403 { + // BUG 36752336: + log.Info("Database does not exists ") + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + if apierrors.IsNotFound(errstate) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + log.Error(errstate, "Failed to update status for :"+lrpdb.Name, "err", errstate.Error()) + return errstate + //* if the pdb does not exists delete the crd *// + + } + + if lrpdb.Status.OpenMode == "READ WRITE" { + + errdel := errors.New("pdb is open cannot delete it") + log.Info("LRPDB is open in read write cannot drop ") + lrpdb.Status.Msg = "LRPDB is open in read write cannot drop " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + return errdel + } + + err := r.deleteLRPDBInstance(req, ctx, lrpdb) + if err != nil { + log.Info("Could not delete LRPDB", "LRPDB Name", lrpdb.Spec.LRPDBName, "err", err.Error()) + return err + } + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = true + err = r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Deleted", "LRPDB '%s' dropped successfully", lrpdb.Spec.LRPDBName) + + log.Info("Successfully deleted LRPDB resource") + return nil +} + +/* +************************************************ + - Check LRPDB deletion + /*********************************************** +*/ +func (r *LRPDBReconciler) manageLRPDBDeletion(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("manageLRPDBDeletion", req.NamespacedName) + + // Check if the LRPDB instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + isLRPDBMarkedToBeDeleted := lrpdb.GetDeletionTimestamp() != nil + if isLRPDBMarkedToBeDeleted { + log.Info("Marked to be deleted") + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Status = true + r.Status().Update(ctx, lrpdb) + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + // Remove LRPDBFinalizer. Once all finalizers have been + // removed, the object will be deleted. + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + log.Info("Successfully removed LRPDB resource") + return nil + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Adding finalizer") + controllerutil.AddFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not add finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = false + } + return nil +} + +/* +************************************************ + - Finalization logic for LRPDBFinalizer + +*********************************************** +*/ +func (r *LRPDBReconciler) deleteLRPDBInstance(req ctrl.Request, ctx context.Context, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("deleteLRPDBInstance", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "action": "KEEP", + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if lrpdb.Spec.DropAction != "" { + values["action"] = lrpdb.Spec.DropAction + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/" + + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Msg = "Waiting for LRPDB to be deleted" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "DELETE") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + log.Info("Successfully dropped LRPDB", "LRPDB Name", lrpdbName) + return nil +} + +/* +*********************************************************** + - SetupWithManager sets up the controller with the Manager + +************************************************************ +*/ +func (r *LRPDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.LRPDB{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} + +/************************************************************* +Enh 35357707 - PROVIDE THE LRPDB TNSALIAS INFORMATION +**************************************************************/ + +func parseTnsAlias(tns *string, lrpdbsrv *string) { + fmt.Printf("Analyzing string [%s]\n", *tns) + fmt.Printf("Relacing srv [%s]\n", *lrpdbsrv) + var swaptns string + + if strings.Contains(strings.ToUpper(*tns), "SERVICE_NAME") == false { + fmt.Print("Cannot generate tns alias for lrpdb") + return + } + + if strings.Contains(strings.ToUpper(*tns), "ORACLE_SID") == true { + fmt.Print("Cannot generate tns alias for lrpdb") + return + } + + swaptns = fmt.Sprintf("SERVICE_NAME=%s", *lrpdbsrv) + tnsreg := regexp.MustCompile(`SERVICE_NAME=\w+`) + *tns = tnsreg.ReplaceAllString(*tns, swaptns) + + fmt.Printf("Newstring [%s]\n", *tns) + +} + +// Compose url +func (r *LRPDBReconciler) BaseUrl(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, lrest dbapi.LREST) string { + log := r.Log.WithValues("BaseUrl", req.NamespacedName) + baseurl := "https://" + lrpdb.Spec.CDBResName + "-lrest." + lrpdb.Spec.CDBNamespace + ":" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + log.Info("Baseurl:" + baseurl) + return baseurl +} + +func (r *LRPDBReconciler) DecryptWithPrivKey(Key string, Buffer string, req ctrl.Request) (string, error) { + log := r.Log.WithValues("DecryptWithPrivKey", req.NamespacedName) + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + log.Error(err, "Failed to parse private key - "+err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + log.Error(err, "Failed to decode encrypted string to base64 - "+err.Error()) + return "", err + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + log.Error(err, "Failed to decrypt string - "+err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +// New function to decrypt credential using private key +func (r *LRPDBReconciler) getEncriptedSecret(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, secretName string, keyName string, secretNamePk string, keyNamePk string) (string, error) { + + log := r.Log.WithValues("getEncriptedSecret", req.NamespacedName) + + log.Info("getEncriptedSecret :" + secretName) + secret1 := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrpdb.Namespace}, secret1) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrpdb.Status.Msg = "Secret not found:" + secretName + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + secret2 := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: secretNamePk, Namespace: lrpdb.Namespace}, secret2) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretNamePk) + lrpdb.Status.Msg = "Secret not found:" + secretNamePk + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + Encval := string(secret1.Data[keyName]) + Encval = strings.TrimSpace(Encval) + + privKey := string(secret2.Data[keyNamePk]) + privKey = strings.TrimSpace(privKey) + + /* Debuug info for dev phase + fmt.Printf("DEBUG Secretename:secretName :%s\n", secretName) + fmt.Printf("DEBUG privKey :%s\n", privKey) + fmt.Printf("DEBUG Encval :%s\n", Encval) + */ + + DecVal, err := r.DecryptWithPrivKey(privKey, Encval, req) + if err != nil { + log.Error(err, "Fail to decrypt secret:"+secretName) + lrpdb.Status.Msg = " Fail to decrypt secret:" + secretName + return "", err + } + return DecVal, nil +} + +func (r *LRPDBReconciler) manageLRPDBDeletion2(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("manageLRPDBDeletion", req.NamespacedName) + if lrpdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + controllerutil.AddFinalizer(lrpdb, LRPDBFinalizer) + if err := r.Update(ctx, lrpdb); err != nil { + return err + } + } + } else { + log.Info("Pdb marked to be delted") + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + if assertiveLpdbDeletion == true { + log.Info("Deleting lrpdb CRD: Assertive approach is turned on ") + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Error(err, "Cannont find cdb resource ", "err", err.Error()) + return err + } + + lrpdbName := lrpdb.Spec.LRPDBName + if lrpdb.Status.OpenMode == "READ WRITE" { + valuesclose := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE"} + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + _, errclose := r.callAPI(ctx, req, lrpdb, url, valuesclose, "POST") + if errclose != nil { + log.Info("Warning error closing lrpdb continue anyway") + } + } + + valuesdrop := map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE"} + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/" + + log.Info("Call Delete()") + _, errdelete := r.callAPI(ctx, req, lrpdb, url, valuesdrop, "DELETE") + if errdelete != nil { + log.Error(errdelete, "Fail to delete lrpdb :"+lrpdb.Name, "err", err.Error()) + return errdelete + } + } /* END OF ASSERTIVE SECTION */ + + log.Info("Marked to be deleted") + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Status = true + r.Status().Update(ctx, lrpdb) + + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + if err := r.Update(ctx, lrpdb); err != nil { + log.Info("Cannot remove finalizer") + return err + } + + } + + return nil + } + + return nil +} + +func (r *LRPDBReconciler) InitConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) *corev1.ConfigMap { + log := r.Log.WithValues("InitConfigMap", req.NamespacedName) + log.Info("ConfigMap..............:" + "ConfigMap" + lrpdb.Name) + log.Info("ConfigMap nmsp.........:" + lrpdb.Namespace) + /* + * PDB SYSTEM PARAMETER + * record [name,value=[paramete_val|reset],level=[session|system]] + */ + + if lrpdb.Spec.PDBConfigMap == "" { + /* if users does not specify a config map + we generate an empty new one for possible + future pdb parameter modification */ + + var SystemParameters map[string]string + + log.Info("Generating an empty configmap") + globalconfigmap = "configmap-" + lrpdb.Spec.LRPDBName + "-default" + DbParameters := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "configmap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: globalconfigmap, + Namespace: lrpdb.Namespace, + }, + Data: SystemParameters, + } + + if err := ctrl.SetControllerReference(lrpdb, DbParameters, r.Scheme); err != nil { + log.Error(err, "Fail to set SetControllerReference", "err", err.Error()) + return nil + } + + /* Update Spec.PDBConfigMap */ + lrpdb.Spec.PDBConfigMap = "configmap" + lrpdb.Spec.LRPDBName + "default" + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Failure updating Spec.PDBConfigMap ", "err", err.Error()) + return nil + } + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPEMPT) + return DbParameters + + } else { + + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPINIT) + globalconfigmap = lrpdb.Spec.PDBConfigMap + DbParameters, err := r.GetConfigMap(ctx, req, lrpdb) + if err != nil { + log.Error(err, "Fail to fetch configmap ", "err", err.Error()) + return nil + } + + //ParseConfigMapData(DbParameters) + + return DbParameters + } + + return nil +} + +func (r *LRPDBReconciler) GetConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (*corev1.ConfigMap, error) { + log := r.Log.WithValues("GetConfigMap", req.NamespacedName) + log.Info("ConfigMapGlobal.............:" + globalconfigmap) + DbParameters, err := k8s.FetchConfigMap(r.Client, lrpdb.Namespace, globalconfigmap) + if err != nil { + log.Error(err, "Fail to fetch configmap", "err", err.Error()) + return nil, err + } + + return DbParameters, nil +} + +func (r *LRPDBReconciler) ApplyConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (int32, error) { + log := r.Log.WithValues("ApplyConfigMap", req.NamespacedName) + /* We read the config map and apply the setting to the pdb */ + + log.Info("Starting Apply Config Map Process") + configmap, err := r.GetConfigMap(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot get config map in the open yaml file") + return 0, nil + } + Cardinality := int32(len(configmap.Data)) + if Cardinality == 0 { + log.Info("Empty config map... nothing to do ") + return 0, nil + } + log.Info("GetConfigMap completed") + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot find lrest server") + return 0, nil + } + tokens := lrcommons.ParseConfigMapData(configmap) + for cnt := range tokens { + if len(tokens[cnt]) != 0 { + /* avoid null token and check malformed value */ + fmt.Printf("token=[%s]\n", tokens[cnt]) + Parameter := strings.Split(tokens[cnt], " ") + if len(Parameter) != 3 { + log.Info("WARNING malformed value in the configmap") + } else { + fmt.Printf("alter system set %s=%s scope=%s instances=all\n", Parameter[0], Parameter[1], Parameter[2]) + /* Preparing PayLoad + ----------------- + WARNING: event setting is not yet supported. It will be implemented in future release + */ + AlterSystemPayload := map[string]string{ + "state": "ALTER", + "alterSystemParameter": Parameter[0], + "alterSystemValue": Parameter[1], + "parameterScope": Parameter[2], + } + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + respData, err := r.callAPI(ctx, req, lrpdb, url, AlterSystemPayload, "POST") + if err != nil { + log.Error(err, "callAPI failure durring Apply Config Map", "err", err.Error()) + return 0, err + } + /* check sql code execution */ + var retJson map[string]interface{} + if err := json.Unmarshal([]byte(respData), &retJson); err != nil { + log.Error(err, "failed to get Data from callAPI", "err", err.Error()) + return 0, err + } + /* We do not the execution if something goes wrong for a single parameter + just report the error in the event queue */ + SqlCode := strconv.Itoa(int(retJson["sqlcode"].(float64))) + AlterMsg := fmt.Sprintf("pdb=%s:%s:%s:%s:%s", lrpdb.Spec.LRPDBName, Parameter[0], Parameter[1], Parameter[2], SqlCode) + log.Info("Config Map Apply:......." + AlterMsg) + + if SqlCode != "0" { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", AlterMsg) + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPWARN) + } + + } + } + + } + + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPAPPL) + + return Cardinality, nil +} + +func (r *LRPDBReconciler) ManageConfigMapForCloningAndPlugin(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("ManageConfigMapForCloningAndPlugin", req.NamespacedName) + log.Info("Frame:") + /* + If configmap parameter is set and init flag is not set + then we need to iniialized the init mask. This is the case for + pdb generated by clone and plug action + */ + if lrpdb.Spec.Action != "CREATE" && lrpdb.Spec.PDBConfigMap != "" && bit(lrpdb.Status.Bitstat, MPINIT) == false { + if r.InitConfigMap(ctx, req, lrpdb) == nil { + log.Info("Cannot initialize config map for pdb.........:" + lrpdb.Spec.LRPDBName) + return nil + } + log.Info("Call...........:ApplyConfigMap(ctx, req, lrpdb)") + Cardinality, _ := r.ApplyConfigMap(ctx, req, lrpdb) + log.Info("Cardnality:....:" + strconv.Itoa(int(Cardinality))) + if Cardinality == 0 { + return nil + } + + } + return nil +} + +func NewCallLAPI(intr interface{}, ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, url string, payload map[string]string, action string) (string, error) { + var c client.Client + var r logr.Logger + var e record.EventRecorder + var err error + + recpdb, ok1 := intr.(*LRPDBReconciler) + if ok1 { + fmt.Printf("func NewCallLApi ((*PDBReconciler),......)\n") + c = recpdb.Client + e = recpdb.Recorder + r = recpdb.Log + } + + reccdb, ok2 := intr.(*LRESTReconciler) + if ok2 { + fmt.Printf("func NewCallLApi ((*CDBReconciler),......)\n") + c = reccdb.Client + e = reccdb.Recorder + r = reccdb.Log + } + + log := r.WithValues("NewCallLAPI", req.NamespacedName) + + secret := &corev1.Secret{} + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[lrpdb.Spec.LRPDBTlsKey.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[lrpdb.Spec.LRPDBTlsCrt.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCat.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[lrpdb.Spec.LRPDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaCertPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + lrpdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + /* + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool} + */ + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool, + //MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + // Get Web Server User + //secret := &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserEnc := string(secret.Data[lrpdb.Spec.WebLrpdbServerUser.Secret.Key]) + webUserEnc = strings.TrimSpace(webUserEnc) + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBPriKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBPriKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + privKey := string(secret.Data[lrpdb.Spec.LRPDBPriKey.Secret.Key]) + webUser, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserPwdEnc := string(secret.Data[lrpdb.Spec.WebLrpdbServerPwd.Secret.Key]) + webUserPwdEnc = strings.TrimSpace(webUserPwdEnc) + webUserPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserPwdEnc, req) + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for LRPDB : "+lrpdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to LREST Pod", "err", err.Error()) + lrpdb.Status.Msg = "Error: Could not connect to LREST Pod" + e.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", errmsg) + return "", err + } + + e.Eventf(lrpdb, corev1.EventTypeWarning, "Done", lrpdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + lrpdb.Status.ConnString = "" + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " not found" + + } else { + if flood_control == false { + lrpdb.Status.Msg = "LREST Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if flood_control == false { + log.Info("LREST Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr LRESTError + json.Unmarshal([]byte(bb), &apiErr) + if flood_control == false { + e.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", "Failed: %s", apiErr.Message) + } + fmt.Printf("\n================== APIERR ======================\n") + fmt.Printf("%+v \n", apiErr) + fmt.Printf(string(bb)) + fmt.Printf("URL=%s\n", url) + fmt.Printf("resp.StatusCode=%s\n", strconv.Itoa(resp.StatusCode)) + fmt.Printf("\n================== APIERR ======================\n") + flood_control = true + return "", errors.New("LREST Error") + } + flood_control = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + fmt.Print("CALL API return msg.....:") + fmt.Println(string(bodyBytes)) + + var apiResponse restSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + fmt.Printf("===> %#v\n", apiResponse) + fmt.Printf("===> %+v\n", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("LREST Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + lrpdb.Status.Msg = sqlItem.ErrorDetails + } + e.Eventf(lrpdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} + +func (r *LRPDBReconciler) GetSqlCode(rsp string, sqlcode *int) error { + log := r.Log.WithValues("GetSqlCode", "callAPI(...)") + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(rsp), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + *sqlcode = int(objmap["sqlcode"].(float64)) + log.Info("sqlcode.......:ora-" + strconv.Itoa(*sqlcode)) + if *sqlcode != 0 { + switch strconv.Itoa(*sqlcode) { + case "65019": /* already open */ + return nil + case "65020": /* already closed */ + return nil + } + err := fmt.Errorf("%v", sqlcode) + return err + } + return nil +} diff --git a/controllers/database/oraclerestdataservice_controller.go b/controllers/database/oraclerestdataservice_controller.go new file mode 100644 index 00000000..053f4a19 --- /dev/null +++ b/controllers/database/oraclerestdataservice_controller.go @@ -0,0 +1,1615 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + + "github.com/go-logr/logr" +) + +const oracleRestDataServiceFinalizer = "database.oracle.com/oraclerestdataservicefinalizer" + +// OracleRestDataServiceReconciler reconciles a OracleRestDataService object +type OracleRestDataServiceReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Config *rest.Config + Recorder record.EventRecorder +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=oraclerestdataservices,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=oraclerestdataservices/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=oraclerestdataservices/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the OracleRestDataService object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile +func (r *OracleRestDataServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + oracleRestDataService := &dbapi.OracleRestDataService{} + // Always refresh status before a reconcile + defer r.Status().Update(ctx, oracleRestDataService) + + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, oracleRestDataService) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("Resource deleted") + return requeueN, nil + } + r.Log.Error(err, err.Error()) + return requeueY, err + } + + /* Initialize Status */ + if oracleRestDataService.Status.Status == "" { + oracleRestDataService.Status.Status = dbcommons.StatusPending + oracleRestDataService.Status.ApxeUrl = dbcommons.ValueUnavailable + oracleRestDataService.Status.DatabaseApiUrl = dbcommons.ValueUnavailable + oracleRestDataService.Status.DatabaseActionsUrl = dbcommons.ValueUnavailable + r.Status().Update(ctx, oracleRestDataService) + } + oracleRestDataService.Status.LoadBalancer = strconv.FormatBool(oracleRestDataService.Spec.LoadBalancer) + oracleRestDataService.Status.Image = oracleRestDataService.Spec.Image + + // Fetch Primary Database Reference + singleInstanceDatabase := &dbapi.SingleInstanceDatabase{} + // Always refresh status before a reconcile + defer r.Status().Update(ctx, singleInstanceDatabase) + + err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: oracleRestDataService.Spec.DatabaseRef}, singleInstanceDatabase) + if err != nil { + if apierrors.IsNotFound(err) { + oracleRestDataService.Status.Status = dbcommons.StatusError + oracleRestDataService.Status.DatabaseRef = "" + eventReason := "Error" + eventMsg := "database reference " + oracleRestDataService.Spec.DatabaseRef + " not found" + r.Recorder.Eventf(oracleRestDataService, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueY, nil + } + r.Log.Error(err, err.Error()) + return requeueY, err + } else { + if oracleRestDataService.Status.DatabaseRef == "" { + oracleRestDataService.Status.Status = dbcommons.StatusPending + oracleRestDataService.Status.DatabaseRef = oracleRestDataService.Spec.DatabaseRef + eventReason := "Database Check" + eventMsg := "database reference " + oracleRestDataService.Spec.DatabaseRef + " found" + r.Recorder.Eventf(oracleRestDataService, corev1.EventTypeNormal, eventReason, eventMsg) + } + } + + // Manage OracleRestDataService Deletion + result := r.manageOracleRestDataServiceDeletion(req, ctx, oracleRestDataService, singleInstanceDatabase) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // First validate + result, err = r.validate(oracleRestDataService, singleInstanceDatabase, ctx) + if result.Requeue || err != nil { + r.Log.Info("Spec validation failed") + return result, nil + } + + // Create Service + result = r.createSVC(ctx, req, oracleRestDataService, singleInstanceDatabase) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // PVC Creation + result, _ = r.createPVC(ctx, req, oracleRestDataService) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Validate if Primary Database Reference is ready + result, sidbReadyPod := r.validateSIDBReadiness(oracleRestDataService, singleInstanceDatabase, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Create ORDS Pods + result = r.createPods(oracleRestDataService, singleInstanceDatabase, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + var ordsReadyPod corev1.Pod + result, ordsReadyPod = r.checkHealthStatus(oracleRestDataService, singleInstanceDatabase, sidbReadyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + result = r.restEnableSchemas(oracleRestDataService, singleInstanceDatabase, sidbReadyPod, ordsReadyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Configure Apex + result = r.configureApex(oracleRestDataService, singleInstanceDatabase, sidbReadyPod, ordsReadyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Configure MongoDB + result = r.enableMongoDB(oracleRestDataService, singleInstanceDatabase, sidbReadyPod, ordsReadyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Delete Secrets + r.deleteSecrets(oracleRestDataService, ctx, req) + + if oracleRestDataService.Status.ServiceIP == "" { + return requeueY, nil + } + + return ctrl.Result{}, nil +} + +// ############################################################################# +// +// Validate the CRD specs +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) validate(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, ctx context.Context) (ctrl.Result, error) { + + var err error + eventReason := "Spec Error" + var eventMsgs []string + + //First check image pull secrets + if m.Spec.Image.PullSecrets != "" { + secret := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.Image.PullSecrets, Namespace: m.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + // Secret not found + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) + r.Log.Info(err.Error()) + m.Status.Status = dbcommons.StatusError + return requeueY, err + } + r.Log.Error(err, err.Error()) + return requeueY, err + } + } + + // If ORDS has no peristence specified, ensure SIDB has persistence configured + if m.Spec.Persistence.Size == "" && n.Spec.Persistence.AccessMode == "" { + eventMsgs = append(eventMsgs, "cannot configure ORDS for database "+m.Spec.DatabaseRef+" that has no attached persistent volume") + } + if !m.Status.OrdsInstalled && n.Status.OrdsReference != "" { + eventMsgs = append(eventMsgs, "database "+m.Spec.DatabaseRef+" is already configured with ORDS "+n.Status.OrdsReference) + } + if m.Status.DatabaseRef != "" && m.Status.DatabaseRef != m.Spec.DatabaseRef { + eventMsgs = append(eventMsgs, "databaseRef cannot be updated") + } + if m.Status.Image.PullFrom != "" && m.Status.Image != m.Spec.Image { + eventMsgs = append(eventMsgs, "image patching is not available currently") + } + + if len(eventMsgs) > 0 { + m.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, strings.Join(eventMsgs, ",")) + r.Log.Info(strings.Join(eventMsgs, "\n")) + err = errors.New(strings.Join(eventMsgs, ",")) + return requeueY, err + } + + return requeueN, err +} + +// ##################################################################################################### +// +// Validate Readiness of the primary DB specified +// +// ##################################################################################################### +func (r *OracleRestDataServiceReconciler) validateSIDBReadiness(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, corev1.Pod) { + + log := r.Log.WithValues("validateSidbReadiness", req.NamespacedName) + + // ## FETCH THE SIDB REPLICAS . + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, n.Spec.Image.Version, + n.Spec.Image.PullFrom, n.Name, n.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return requeueY, sidbReadyPod + } + + if m.Status.OrdsInstalled || m.Status.CommonUsersCreated { + return requeueN, sidbReadyPod + } + + m.Status.Status = dbcommons.StatusPending + if sidbReadyPod.Name == "" || n.Status.Status != dbcommons.StatusReady { + eventReason := "Database Check" + eventMsg := "status of database " + n.Name + " is not ready, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + return requeueY, sidbReadyPod + } else { + eventReason := "Database Check" + eventMsg := "status of database " + n.Name + " is ready" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + } + + // Validate databaseRef Admin Password + adminPasswordSecret := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + eventReason := "Database Password" + eventMsg := "password secret " + m.Spec.AdminPassword.SecretName + " not found, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueY, sidbReadyPod + } + log.Error(err, err.Error()) + return requeueY, sidbReadyPod + } + adminPassword := string(adminPasswordSecret.Data[m.Spec.AdminPassword.SecretKey]) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.ValidateAdminPassword, adminPassword), dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY, sidbReadyPod + } + if strings.Contains(out, "USER is \"SYS\"") { + log.Info("validated Admin password successfully") + } else if strings.Contains(out, "ORA-01017") { + m.Status.Status = dbcommons.StatusError + eventReason := "Database Check" + eventMsg := "login denied, invalid database admin password in secret " + m.Spec.AdminPassword.SecretName + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + log.Info(eventMsg) + return requeueY, sidbReadyPod + } else { + eventMsg := "login attempt failed for database admin password in secret " + m.Spec.AdminPassword.SecretName + log.Info(eventMsg) + return requeueY, sidbReadyPod + } + + // Create PDB , CDB Admin users and grant permissions. ORDS installation on CDB level + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.SetAdminUsersSQL, adminPassword), dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY, sidbReadyPod + } + if !strings.Contains(out, "ERROR") || !strings.Contains(out, "ORA-") || + strings.Contains(out, "ERROR") && strings.Contains(out, "ORA-01920") { + m.Status.CommonUsersCreated = true + } + return requeueN, sidbReadyPod +} + +// ##################################################################################################### +// +// Check ORDS Health Status +// +// ##################################################################################################### +func (r *OracleRestDataServiceReconciler) checkHealthStatus(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, + sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, corev1.Pod) { + log := r.Log.WithValues("checkHealthStatus", req.NamespacedName) + + readyPod, _, _, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return requeueY, readyPod + } + if readyPod.Name == "" { + m.Status.Status = dbcommons.StatusPending + return requeueY, readyPod + } + + // Get ORDS Status + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", + dbcommons.GetORDSStatus) + log.Info("GetORDSStatus Output") + log.Info(out) + if strings.Contains(strings.ToUpper(out), "ERROR") { + return requeueY, readyPod + } + if err != nil { + log.Info(err.Error()) + if strings.Contains(strings.ToUpper(err.Error()), "ERROR") { + return requeueY, readyPod + } + } + + m.Status.Status = dbcommons.StatusUpdating + if strings.Contains(out, "HTTP/1.1 200 OK") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 200 OK") { + if n.Status.Status == dbcommons.StatusReady || n.Status.Status == dbcommons.StatusUpdating || n.Status.Status == dbcommons.StatusPatching { + m.Status.Status = dbcommons.StatusReady + } + if !m.Status.OrdsInstalled { + m.Status.OrdsInstalled = true + n.Status.OrdsReference = m.Name + r.Status().Update(ctx, n) + eventReason := "ORDS Installation" + eventMsg := "installation of ORDS completed" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.OpenPDBSeed, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + } else { + log.Info("Close PDB seed") + log.Info(out) + } + } + } + if m.Status.Status == dbcommons.StatusUpdating { + return requeueY, readyPod + } + return requeueN, readyPod +} + +// ############################################################################# +// +// Instantiate Service spec from OracleRestDataService spec +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) instantiateSVCSpec(m *dbapi.OracleRestDataService) *corev1.Service { + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + Labels: map[string]string{ + "app": m.Name, + }, + Annotations: func() map[string]string { + annotations := make(map[string]string) + if len(m.Spec.ServiceAnnotations) != 0 { + for key, value := range m.Spec.ServiceAnnotations { + annotations[key] = value + } + } + return annotations + }(), + }, + Spec: corev1.ServiceSpec{ + Ports: func() []corev1.ServicePort { + ports := []corev1.ServicePort{ + { + Name: "client", + Port: 8181, + Protocol: corev1.ProtocolTCP, + }, + } + // Conditionally add MongoDB port if enabled + if m.Spec.MongoDbApi { + ports = append(ports, corev1.ServicePort{ + Name: "mongodb", + Port: 27017, + Protocol: corev1.ProtocolTCP, + }) + } + return ports + }(), + Selector: map[string]string{ + "app": m.Name, + }, + Type: corev1.ServiceType(func() string { + if m.Spec.LoadBalancer { + return "LoadBalancer" + } + return "NodePort" + }()), + }, + } + // Set StandbyDatabase instance as the owner and controller + ctrl.SetControllerReference(m, svc, r.Scheme) + return svc +} + +// ############################################################################# +// +// Instantiate POD spec from OracleRestDataService spec +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) instantiatePodSpec(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, req ctrl.Request) *corev1.Pod { + + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name + "-" + dbcommons.GenerateRandomString(5), + Namespace: m.Namespace, + Labels: map[string]string{ + "app": m.Name, + "version": m.Spec.Image.Version, + }, + }, + Spec: corev1.PodSpec{ + Affinity: func() *corev1.Affinity { + if m.Spec.Persistence.Size == "" && n.Spec.Persistence.AccessMode == "ReadWriteOnce" { + // Only allowing pods to be scheduled on the node where SIDB pods are running + return &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{n.Name}, // Schedule on same host as DB Pod + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + } + return nil + }(), + Volumes: []corev1.Volume{ + { + Name: "datamount", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: func() string { + if m.Spec.Persistence.AccessMode != "" { + return m.Name + } + return n.Name + }(), + ReadOnly: false, + }, + }, + }, + { + Name: "varmount", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + InitContainers: func() []corev1.Container { + initContainers := []corev1.Container{} + if m.Spec.Persistence.Size != "" && m.Spec.Persistence.SetWritePermissions != nil && *m.Spec.Persistence.SetWritePermissions { + initContainers = append(initContainers, corev1.Container{ + Name: "init-permissions", + Image: m.Spec.Image.PullFrom, + Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /etc/ords/config/ || true", int(dbcommons.ORACLE_UID), int(dbcommons.DBA_GUID))}, + SecurityContext: &corev1.SecurityContext{ + // User ID 0 means, root user + RunAsUser: func() *int64 { i := int64(0); return &i }(), + }, + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/etc/ords/config/", + Name: "datamount", + }}, + }) + } + + initContainers = append(initContainers, corev1.Container{ + Name: "init-ords", + Image: m.Spec.Image.PullFrom, + Command: []string{"/bin/sh"}, + Args: []string{ + "-c", + fmt.Sprintf("while [ ! -f /opt/oracle/variables/%s ]; do sleep 0.5; done", "conn_string.txt"), + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/etc/ords/config/", + Name: "datamount", + }, + { + MountPath: "/opt/oracle/variables/", + Name: "varmount", + }, + }, + }) + return initContainers + }(), + Containers: []corev1.Container{{ + Name: m.Name, + Image: m.Spec.Image.PullFrom, + Ports: func() []corev1.ContainerPort { + ports := []corev1.ContainerPort{ + { + ContainerPort: 8181, // Default application port + }, + } + if m.Spec.MongoDbApi { + ports = append(ports, corev1.ContainerPort{ + ContainerPort: 27017, // MongoDB port + }) + } + return ports + }(), + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", dbcommons.ORDSReadinessProbe}, + }, + }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if m.Spec.ReadinessCheckPeriod > 0 { + return int32(m.Spec.ReadinessCheckPeriod) + } + return 60 + }(), + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/etc/ords/config/", + Name: "datamount", + }, + { + MountPath: "/opt/oracle/variables/", + Name: "varmount", + }, + }, + Env: func() []corev1.EnvVar { + // After ORDS is Installed, we DELETE THE OLD ORDS Pod and create new ones ONLY USING BELOW ENV VARIABLES. + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: n.Name, + }, + { + Name: "ORACLE_PORT", + Value: "1521", + }, + { + Name: "ORACLE_SERVICE", + Value: func() string { + if m.Spec.OracleService != "" { + return m.Spec.OracleService + } + return n.Spec.Sid + }(), + }, + { + Name: "ORDS_USER", + Value: func() string { + if m.Spec.OrdsUser != "" { + return m.Spec.OrdsUser + } + return "ORDS_PUBLIC_USER" + }(), + }, + } + }(), + }}, + + TerminationGracePeriodSeconds: func() *int64 { i := int64(30); return &i }(), + + NodeSelector: func() map[string]string { + ns := make(map[string]string) + if len(m.Spec.NodeSelector) != 0 { + for key, value := range m.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + ServiceAccountName: func() string { + if m.Spec.ServiceAccountName != "" { + return m.Spec.ServiceAccountName + } + return "default" + }(), + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: func() *int64 { i := int64(dbcommons.ORACLE_UID); return &i }(), + RunAsGroup: func() *int64 { i := int64(dbcommons.DBA_GUID); return &i }(), + FSGroup: func() *int64 { i := int64(dbcommons.DBA_GUID); return &i }(), + }, + + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: m.Spec.Image.PullSecrets, + }, + }, + }, + } + + // Set oracleRestDataService instance as the owner and controller + // ctrl.SetControllerReference(m, initSecret, r.Scheme) + ctrl.SetControllerReference(m, pod, r.Scheme) + return pod +} + +//############################################################################# +// Instantiate POD spec from OracleRestDataService spec +//############################################################################# + +// ############################################################################# +// +// Instantiate Persistent Volume Claim spec from SingleInstanceDatabase spec +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) instantiatePVCSpec(m *dbapi.OracleRestDataService) *corev1.PersistentVolumeClaim { + + pvc := &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + Labels: map[string]string{ + "app": m.Name, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: func() []corev1.PersistentVolumeAccessMode { + var accessMode []corev1.PersistentVolumeAccessMode + accessMode = append(accessMode, corev1.PersistentVolumeAccessMode(m.Spec.Persistence.AccessMode)) + return accessMode + }(), + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + // Requests describes the minimum amount of compute resources required + "storage": resource.MustParse(m.Spec.Persistence.Size), + }, + }, + StorageClassName: &m.Spec.Persistence.StorageClass, + VolumeName: m.Spec.Persistence.VolumeName, + Selector: func() *metav1.LabelSelector { + if m.Spec.Persistence.StorageClass != "oci" { + return nil + } + return &metav1.LabelSelector{ + MatchLabels: func() map[string]string { + ns := make(map[string]string) + if len(m.Spec.NodeSelector) != 0 { + for key, value := range m.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + } + }(), + }, + } + // Set SingleInstanceDatabase instance as the owner and controller + ctrl.SetControllerReference(m, pvc, r.Scheme) + return pvc +} + +// ############################################################################# +// +// Create a Service for OracleRestDataService +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) createSVC(ctx context.Context, req ctrl.Request, + m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase) ctrl.Result { + + log := r.Log.WithValues("createSVC", req.NamespacedName) + // Check if the Service already exists, if not create a new one + svc := &corev1.Service{} + svcDeleted := false + // Check if the Service already exists, if not create a new one + // Get retrieves an obj ( a struct pointer ) for the given object key from the Kubernetes Cluster. + err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, svc) + if err == nil { + log.Info("Found Existing Service ", "Service.Name", svc.Name) + svcType := corev1.ServiceType("NodePort") + if m.Spec.LoadBalancer { + svcType = corev1.ServiceType("LoadBalancer") + } + + if svc.Spec.Type != svcType { + log.Info("Deleting SVC", " name ", svc.Name) + err = r.Delete(ctx, svc) + if err != nil { + r.Log.Error(err, "Failed to delete svc", " Name", svc.Name) + return requeueN + } + svcDeleted = true + } + } + + if svcDeleted || (err != nil && apierrors.IsNotFound(err)) { + // Define a new Service + svc = r.instantiateSVCSpec(m) + log.Info("Creating a new Service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + err = r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + return requeueY + } else { + eventReason := "Service creation" + eventMsg := "successfully created service type " + string(svc.Spec.Type) + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(eventMsg) + } + + } else if err != nil { + log.Error(err, "Failed to get Service") + return requeueY + } + + m.Status.ServiceIP = "" + if m.Spec.LoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) > 0 { + // 'lbAddress' will contain the Fully Qualified Hostname of the LB. If the hostname is not available it will contain the IP address of the LB + lbAddress := svc.Status.LoadBalancer.Ingress[0].Hostname + if lbAddress == "" { + lbAddress = svc.Status.LoadBalancer.Ingress[0].IP + } + m.Status.DatabaseApiUrl = "http://" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/" + "{schema-name}" + "/_/db-api/stable/" + m.Status.ServiceIP = lbAddress + m.Status.DatabaseActionsUrl = "http://" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/sql-developer" + if m.Status.ApexConfigured { + m.Status.ApxeUrl = "http://" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/apex" + } + if m.Status.MongoDbApi && len(svc.Spec.Ports) > 1 { + m.Status.MongoDbApiAccessUrl = "mongodb://[{user}:{password}@]" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[1].Port) + "/{user}?" + + "authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true" + } else { + m.Status.MongoDbApiAccessUrl = "" + } + } + return requeueN + } + nodeip := dbcommons.GetNodeIp(r, ctx, req) + if nodeip != "" { + m.Status.ServiceIP = nodeip + m.Status.DatabaseApiUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + + "/ords/" + "{schema-name}" + "/_/db-api/stable/" + m.Status.DatabaseActionsUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + + "/ords/sql-developer" + if m.Status.ApexConfigured { + m.Status.ApxeUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/ords/apex" + } + if m.Status.MongoDbApi && len(svc.Spec.Ports) > 1 { + m.Status.MongoDbApiAccessUrl = "mongodb://[{user}:{password}@]" + nodeip + ":" + + fmt.Sprint(svc.Spec.Ports[1].NodePort) + "/{user}?" + + "authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true" + } else { + m.Status.MongoDbApiAccessUrl = "" + } + } + return requeueN +} + +// ############################################################################# +// +// Stake a claim for Persistent Volume +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) createPVC(ctx context.Context, req ctrl.Request, + m *dbapi.OracleRestDataService) (ctrl.Result, error) { + + // PV is shared for ORDS and SIDB + if m.Spec.Persistence.AccessMode == "" { + return requeueN, nil + } + log := r.Log.WithValues("createPVC", req.NamespacedName) + + pvc := &corev1.PersistentVolumeClaim{} + err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, pvc) + if err != nil && apierrors.IsNotFound(err) { + // Define a new PVC + pvc = r.instantiatePVCSpec(m) + log.Info("Creating a new PVC", "PVC.Namespace", pvc.Namespace, "PVC.Name", pvc.Name) + err = r.Create(ctx, pvc) + if err != nil { + log.Error(err, "Failed to create new PVC", "PVC.Namespace", pvc.Namespace, "PVC.Name", pvc.Name) + return requeueY, err + } + return requeueN, nil + } else if err != nil { + log.Error(err, "Failed to get PVC") + return requeueY, err + } else { + log.Info("PVC already exists") + } + + return requeueN, nil +} + +// ############################################################################# +// +// Function for creating connection sting file +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) createConnectionString(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + // Listing all the pods + readyPod, _, availableFinal, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, nil + } + if readyPod.Name != "" { + return requeueN, nil + } + + if len(availableFinal) == 0 { + r.Log.Info("Pods are being created, currently no pods available") + return requeueY, nil + } + + // Iterate through the availableFinal (list of pods) to find out the pod whose status is updated about the init containers + // If no required pod found then requeue the reconcile request + var pod corev1.Pod + var podFound bool + for _, pod = range availableFinal { + // Check if pod status container is updated about init containers + if len(pod.Status.InitContainerStatuses) > 0 { + podFound = true + break + } + } + if !podFound { + r.Log.Info("No pod has its status updated about init containers. Requeueing...") + return requeueY, nil + } + + lastInitContIndex := len(pod.Status.InitContainerStatuses) - 1 + + // If InitContainerStatuses[].Ready is true, it means that the init container is successful + if pod.Status.InitContainerStatuses[lastInitContIndex].Ready { + // Init container named "init-ords" has completed it's execution, hence return and don't requeue + return requeueN, nil + } + + if pod.Status.InitContainerStatuses[lastInitContIndex].State.Running == nil { + // Init container named "init-ords" is not running, so waiting for it to come in running state requeueing the reconcile request + r.Log.Info("Waiting for init-ords to come in running state...") + return requeueY, nil + } + + r.Log.Info("Creating Connection String file...") + + // Querying the secret + r.Log.Info("Querying the database secret ...") + secret := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("Secret not found") + m.Status.Status = dbcommons.StatusError + r.Status().Update(ctx, m) + return requeueY, nil + } + r.Log.Error(err, "Unable to get the secret. Requeueing..") + return requeueY, nil + } + + // Execing into the pods and creating the Connection String + adminPassword := string(secret.Data[m.Spec.AdminPassword.SecretKey]) + + _, err = dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "init-ords", + ctx, req, true, "bash", "-c", + fmt.Sprintf("mkdir -p /opt/oracle/variables && echo %[1]s > /opt/oracle/variables/%[2]s", + fmt.Sprintf(dbcommons.DbConnectString, adminPassword, n.Name, n.Status.Pdbname), + "conn_string.txt")) + + if err != nil { + r.Log.Error(err, err.Error()) + r.Log.Error(err, "Failed to create connection string in new "+m.Name+" POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return requeueY, nil + } + r.Log.Info("Succesfully Created connection string in new "+m.Name+" POD", "POD.NAME : ", pod.Name) + + return requeueN, nil +} + +// ############################################################################# +// +// Create the requested POD replicas +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) createPods(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) ctrl.Result { + + log := r.Log.WithValues("createPods", req.NamespacedName) + + readyPod, replicasFound, available, podsMarkedToBeDeleted, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + + // Recreate new pods only after earlier pods are terminated completely + for i := 0; i < len(podsMarkedToBeDeleted); i++ { + r.Log.Info("Force deleting pod ", "name", podsMarkedToBeDeleted[i].Name, "phase", podsMarkedToBeDeleted[i].Status.Phase) + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + r.Delete(ctx, &podsMarkedToBeDeleted[i], &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + } + + log.Info(m.Name, " pods other than one of Ready Pods : ", dbcommons.GetPodNames(available)) + log.Info(m.Name, " Ready Pod : ", readyPod.Name) + + replicasReq := m.Spec.Replicas + if replicasFound == 0 { + m.Status.Status = dbcommons.StatusPending + } + + if replicasFound == replicasReq { + log.Info("No of " + m.Name + " replicas Found are same as Required") + } else if replicasFound < replicasReq { + // Create New Pods , Name of Pods are generated Randomly + for i := replicasFound; i < replicasReq; i++ { + // Obtain admin password of the referred database + adminPasswordSecret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: n.Spec.AdminPassword.SecretName, Namespace: n.Namespace}, adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + m.Status.Status = dbcommons.StatusError + eventReason := "Database Password" + eventMsg := "password secret " + m.Spec.AdminPassword.SecretName + " not found, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueY + } + log.Error(err, err.Error()) + return requeueY + } + + pod := r.instantiatePodSpec(m, n, req) + + log.Info("Creating a new "+m.Name+" POD", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) + err = r.Create(ctx, pod) + if err != nil { + log.Error(err, "Failed to create new "+m.Name+" POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return requeueY + } + log.Info("Succesfully Created new "+m.Name+" POD", "POD.NAME : ", pod.Name) + } + } else { + // Delete extra pods + noDeleted := 0 + if readyPod.Name != "" { + available = append(available, readyPod) + } + for _, pod := range available { + if readyPod.Name == pod.Name { + continue + } + if replicasReq == (len(available) - noDeleted) { + break + } + r.Log.Info("Deleting Pod : ", "POD.NAME", pod.Name) + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + err := r.Delete(ctx, &pod, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + noDeleted += 1 + if err != nil { + r.Log.Error(err, "Failed to delete existing POD", "POD.Name", pod.Name) + // Don't requeue + } + } + } + + // Creating conn string in pods + result, err := r.createConnectionString(m, n, ctx, req) + + if err != nil { + return requeueY + } + if result.Requeue { + log.Info("Requeued at connection string creation") + return requeueY + } + + m.Status.Replicas = m.Spec.Replicas + + return requeueN +} + +// ############################################################################# +// +// Manage Finalizer to cleanup before deletion of OracleRestDataService +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) manageOracleRestDataServiceDeletion(req ctrl.Request, ctx context.Context, + m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase) ctrl.Result { + log := r.Log.WithValues("manageOracleRestDataServiceDeletion", req.NamespacedName) + + // Check if the OracleRestDataService instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + isOracleRestDataServiceMarkedToBeDeleted := m.GetDeletionTimestamp() != nil + if isOracleRestDataServiceMarkedToBeDeleted { + if controllerutil.ContainsFinalizer(m, oracleRestDataServiceFinalizer) { + // Run finalization logic for oracleRestDataServiceFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := r.cleanupOracleRestDataService(req, ctx, m, n); err != nil { + log.Error(err, err.Error()) + return requeueY + } + + n.Status.OrdsReference = "" + // Make sure n.Status.OrdsInstalled is set to false or else it blocks .spec.databaseRef deletion + for i := 0; i < 10; i++ { + log.Info("Clearing the OrdsReference from DB", "name", n.Name) + err := r.Status().Update(ctx, n) + if err != nil { + log.Error(err, err.Error()) + time.Sleep(1 * time.Second) + continue + } + break + } + + // Remove oracleRestDataServiceFinalizer. Once all finalizers have been + // removed, the object will be deleted. + controllerutil.RemoveFinalizer(m, oracleRestDataServiceFinalizer) + err := r.Update(ctx, m) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + } + return requeueY + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(m, oracleRestDataServiceFinalizer) { + controllerutil.AddFinalizer(m, oracleRestDataServiceFinalizer) + err := r.Update(ctx, m) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + } + return requeueN +} + +// ############################################################################# +// +// Finalization logic for OracleRestDataServiceFinalizer +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) cleanupOracleRestDataService(req ctrl.Request, ctx context.Context, + m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase) error { + log := r.Log.WithValues("cleanupOracleRestDataService", req.NamespacedName) + + if m.Status.OrdsInstalled { + // ## FETCH THE SIDB REPLICAS . + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, n.Spec.Image.Version, + n.Spec.Image.PullFrom, n.Name, n.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + if sidbReadyPod.Name == "" { + eventReason := "ORDS Uninstallation" + eventMsg := "skipping ORDS uninstallation as no ready pod for " + n.Name + " is available" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + return nil + } + + // Get Session id , serial# for ORDS_PUBLIC_USER to kill the sessions + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s ", dbcommons.GetSessionInfoSQL, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("GetSessionInfoSQL Output : " + out) + + sessionInfos, _ := dbcommons.StringToLines(out) + killSessions := "" + for _, sessionInfo := range sessionInfos { + if !strings.Contains(sessionInfo, ",") { + // May be a column name or (-----) + continue + } + killSessions += "\n" + fmt.Sprintf(dbcommons.KillSessionSQL, sessionInfo) + } + + //kill all the sessions with given sid,serial# + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s ", killSessions, dbcommons.SQLPlusCLI)) + + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("KillSession Output : " + out) + + // Fetch admin Password of database to uninstall ORDS + adminPasswordSecret := &corev1.Secret{} + adminPasswordSecretFound := false + for i := 0; i < 5; i++ { + err := r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: n.Namespace}, adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + m.Status.Status = dbcommons.StatusError + eventReason := "Error" + eventMsg := "database admin password secret " + m.Spec.AdminPassword.SecretName + " required for ORDS uninstall not found, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + if i < 4 { + time.Sleep(15 * time.Second) + continue + } + } else { + log.Error(err, err.Error()) + } + } else { + adminPasswordSecretFound = true + break + } + } + // Find ORDS ready pod + readyPod, _, _, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + if adminPasswordSecretFound && readyPod.Name != "" { + adminPassword := string(adminPasswordSecret.Data[m.Spec.AdminPassword.SecretKey]) + if n.Status.ApexInstalled { + //Uninstall Apex + eventReason := "Apex Uninstallation" + eventMsg := "Uninstalling Apex..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + log.Info(eventMsg) + out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.UninstallApex, adminPassword, n.Status.Pdbname)) + if err != nil { + log.Info(err.Error()) + } + n.Status.ApexInstalled = false // To reinstall Apex when ORDS is reinstalled + log.Info("Apex uninstall output: " + out) + } + //Uninstall ORDS + eventReason := "ORDS Uninstallation" + eventMsg := "Uninstalling ORDS..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + log.Info(eventMsg) + uninstallORDS := fmt.Sprintf(dbcommons.UninstallORDSCMD, adminPassword) + out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, true, "bash", "-c", + uninstallORDS) + log.Info("ORDS uninstall output: " + out) + if strings.Contains(strings.ToUpper(out), "ERROR") { + return errors.New(out) + } + if err != nil { + log.Info(err.Error()) + } + } + + // Drop Admin Users + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s ", dbcommons.DropAdminUsersSQL, dbcommons.SQLPlusCLI)) + if err != nil { + log.Info(err.Error()) + } + log.Info("Drop admin users: " + out) + + //Delete ORDS pod + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + r.Delete(ctx, &readyPod, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + + //Delete Database Admin Password Secret + if !*m.Spec.AdminPassword.KeepSecret { + err = r.Delete(ctx, adminPasswordSecret, &client.DeleteOptions{}) + if err == nil { + r.Log.Info("Deleted Admin Password Secret :" + adminPasswordSecret.Name) + } + } + } + + // Cleanup steps that the operator needs to do before the CR can be deleted. + log.Info("Successfully cleaned up OracleRestDataService ") + return nil +} + +// ############################################################################# +// +// Configure APEX +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) configureApex(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, + sidbReadyPod corev1.Pod, ordsReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { + log := r.Log.WithValues("verifyApex", req.NamespacedName) + + if m.Status.ApexConfigured { + return requeueN + } + + // Obtain admin password of the referred database + + adminPasswordSecret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + m.Status.Status = dbcommons.StatusError + eventReason := "Database Password" + eventMsg := "password secret " + m.Spec.AdminPassword.SecretName + " not found, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueY + } + log.Error(err, err.Error()) + return requeueY + } + sidbPassword := string(adminPasswordSecret.Data[m.Spec.AdminPassword.SecretKey]) + + // Checking if Apex is installed successfully or not + out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.IsApexInstalled, sidbPassword, n.Status.Pdbname)) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + log.Info("Is Apex installed: \n" + out) + + apexInstalled := "APEXVERSION:" + if !strings.Contains(out, apexInstalled) { + eventReason := "Apex Verification" + eventMsg := "Unable to determine Apex version, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + return requeueY + } + + m.Status.Status = dbcommons.StatusReady + eventReason := "Apex Verification" + outArr := strings.Split(out, apexInstalled) + eventMsg := "Verification of Apex " + strings.TrimSpace(outArr[len(outArr)-1]) + " completed" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + n.Status.ApexInstalled = true + m.Status.ApexConfigured = true + r.Status().Update(ctx, n) + r.Status().Update(ctx, m) + + return requeueN +} + +// ############################################################################# +// +// Delete Secrets +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) deleteSecrets(m *dbapi.OracleRestDataService, ctx context.Context, req ctrl.Request) { + log := r.Log.WithValues("deleteSecrets", req.NamespacedName) + + if !*m.Spec.AdminPassword.KeepSecret { + // Fetch adminPassword Secret + adminPasswordSecret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, adminPasswordSecret) + if err == nil { + //Delete Database Admin Password Secret . + err := r.Delete(ctx, adminPasswordSecret, &client.DeleteOptions{}) + if err == nil { + log.Info("Database admin password secret deleted : " + adminPasswordSecret.Name) + } + } + } + + if !*m.Spec.OrdsPassword.KeepSecret { + // Fetch ordsPassword Secret + ordsPasswordSecret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: m.Spec.OrdsPassword.SecretName, Namespace: m.Namespace}, ordsPasswordSecret) + if err == nil { + //Delete ORDS Password Secret . + err := r.Delete(ctx, ordsPasswordSecret, &client.DeleteOptions{}) + if err == nil { + log.Info("ORDS password secret deleted : " + ordsPasswordSecret.Name) + } + } + } +} + +// ############################################################################# +// +// Enable MongoDB API Support +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) enableMongoDB(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, + sidbReadyPod corev1.Pod, ordsReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { + log := r.Log.WithValues("enableMongoDB", req.NamespacedName) + + if (m.Spec.MongoDbApi && !m.Status.MongoDbApi) || // setting MongoDbApi to true + (!m.Spec.MongoDbApi && m.Status.MongoDbApi) { // setting MongoDbApi to false + m.Status.Status = dbcommons.StatusUpdating + + out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.ConfigMongoDb, strconv.FormatBool(m.Spec.MongoDbApi))) + log.Info("configMongoDB Output: \n" + out) + + if strings.Contains(strings.ToUpper(out), "ERROR") { + return requeueY + } + if err != nil { + log.Info(err.Error()) + if strings.Contains(strings.ToUpper(err.Error()), "ERROR") { + return requeueY + } + } + + m.Status.MongoDbApi = m.Spec.MongoDbApi + m.Status.Status = dbcommons.StatusReady + r.Status().Update(ctx, m) + eventReason := "MongoDB-API Config" + eventMsg := "configuration of MongoDb API completed!" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(eventMsg) + + // ORDS service is resatrted + r.Log.Info("Restarting ORDS Service : " + m.Name) + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: m.Name, Namespace: m.Namespace}, + } + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + err = r.Delete(ctx, svc, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + if err != nil { + r.Log.Error(err, "Failed to delete ORDS service", "Service Name", m.Name) + return requeueY + } + + // ORDS needs to be restarted to configure MongoDB API + r.Log.Info("Restarting ORDS Pod after configuring MongoDb API : " + ordsReadyPod.Name) + err = r.Delete(ctx, &ordsReadyPod, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + if err != nil { + r.Log.Error(err, err.Error()) + } + return requeueY + + } else { + log.Info("MongoDB Already Configured") + } + + return requeueN +} + +// ############################################################################# +// +// Rest Enable/Disable Schemas +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) restEnableSchemas(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, + sidbReadyPod corev1.Pod, ordsReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { + + log := r.Log.WithValues("restEnableSchemas", req.NamespacedName) + + if sidbReadyPod.Name == "" || n.Status.Status != dbcommons.StatusReady { + eventReason := "Database Check" + eventMsg := "status of database " + n.Name + " is not ready, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + m.Status.Status = dbcommons.StatusNotReady + return requeueY + } + + // Get available PDBs + availablePDBS, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", + ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.GetPdbsSQL, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } else { + log.Info("PDBs found:") + log.Info(availablePDBS) + } + + restartORDS := false + + for i := 0; i < len(m.Spec.RestEnableSchemas); i++ { + + pdbName := m.Spec.RestEnableSchemas[i].PdbName + if pdbName == "" { + pdbName = n.Spec.Pdbname + } + + // If the PDB mentioned in yaml doesnt contain in the database , continue + if !strings.Contains(strings.ToUpper(availablePDBS), strings.ToUpper(pdbName)) { + eventReason := "PDB Check" + eventMsg := "PDB " + pdbName + " not found for specified schema " + m.Spec.RestEnableSchemas[i].SchemaName + log.Info(eventMsg) + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + continue + } + + getOrdsSchemaStatus := fmt.Sprintf(dbcommons.GetUserORDSSchemaStatusSQL, m.Spec.RestEnableSchemas[i].SchemaName, pdbName) + + // Get ORDS Schema status for PDB + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", getOrdsSchemaStatus, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + + // if ORDS already enabled for given PDB + if strings.Contains(out, "STATUS:ENABLED") { + if m.Spec.RestEnableSchemas[i].Enable { + log.Info("Schema already enabled", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + continue + } + } else if strings.Contains(out, "STATUS:DISABLED") { + if !m.Spec.RestEnableSchemas[i].Enable { + log.Info("Schema already disabled", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + continue + } + } else if m.Spec.RestEnableSchemas[i].Enable { + OrdsPasswordSecret := &corev1.Secret{} + // Fetch the secret to get password for database user . Secret has to be created in the same namespace of OracleRestDataService + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.OrdsPassword.SecretName, Namespace: m.Namespace}, OrdsPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + eventReason := "No Secret" + eventMsg := "secret " + m.Spec.OrdsPassword.SecretName + " Not Found" + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueY + } + log.Error(err, err.Error()) + return requeueY + } + password := string(OrdsPasswordSecret.Data[m.Spec.OrdsPassword.SecretKey]) + // Create users,schemas and grant enableORDS for PDB + createSchemaSQL := fmt.Sprintf(dbcommons.CreateORDSSchemaSQL, m.Spec.RestEnableSchemas[i].SchemaName, password, pdbName) + log.Info("Creating schema", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", createSchemaSQL, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + } else { + log.Info("Noop, ignoring", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + continue + } + urlMappingPattern := "" + if m.Spec.RestEnableSchemas[i].UrlMapping == "" { + urlMappingPattern = strings.ToLower(m.Spec.RestEnableSchemas[i].SchemaName) + } else { + urlMappingPattern = strings.ToLower(m.Spec.RestEnableSchemas[i].UrlMapping) + } + enableORDSSchema := fmt.Sprintf(dbcommons.EnableORDSSchemaSQL, m.Spec.RestEnableSchemas[i].SchemaName, + strconv.FormatBool(m.Spec.RestEnableSchemas[i].Enable), urlMappingPattern, pdbName) + + // EnableORDS for Schema + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", enableORDSSchema, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY + } + log.Info(out) + if m.Spec.RestEnableSchemas[i].Enable { + log.Info("REST Enabled", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + } else { + log.Info("REST Disabled", "schema", m.Spec.RestEnableSchemas[i].SchemaName) + restartORDS = true + } + } + + if restartORDS { + r.Log.Info("Restarting ORDS Pod " + ordsReadyPod.Name + " to clear disabled schemas cache") + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + err = r.Delete(ctx, &ordsReadyPod, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + if err != nil { + r.Log.Error(err, err.Error()) + } + return requeueY + } + return requeueN +} + +// ############################################################################# +// +// SetupWithManager sets up the controller with the Manager. +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.OracleRestDataService{}). + Owns(&corev1.Pod{}). //Watch for deleted pods of OracleRestDataService Owner + WithEventFilter(dbcommons.ResourceEventHandler()). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). //ReconcileHandler is never invoked concurrently with the same object. + Complete(r) +} diff --git a/controllers/database/ordssrvs_controller.go b/controllers/database/ordssrvs_controller.go new file mode 100644 index 00000000..14c7f46e --- /dev/null +++ b/controllers/database/ordssrvs_controller.go @@ -0,0 +1,1116 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + // dbapi "example.com/oracle-ords-operator/api/v1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +// Definitions of Standards +const ( + ordsSABase = "/opt/oracle/sa" + serviceHTTPPortName = "svc-http-port" + serviceHTTPSPortName = "svc-https-port" + serviceMongoPortName = "svc-mongo-port" + targetHTTPPortName = "pod-http-port" + targetHTTPSPortName = "pod-https-port" + targetMongoPortName = "pod-mongo-port" + globalConfigMapName = "settings-global" + poolConfigPreName = "settings-" // Append PoolName + controllerLabelKey = "oracle.com/ords-operator-filter" + controllerLabelVal = "oracle-database-operator" + specHashLabel = "oracle.com/ords-operator-spec-hash" +) + +// Definitions to manage status conditions +const ( + // typeAvailableORDS represents the status of the Workload reconciliation + typeAvailableORDS = "Available" + // typeUnsyncedORDS represents the status used when the configuration has changed but the Workload has not been restarted. + typeUnsyncedORDS = "Unsynced" +) + +// Trigger a restart of Pods on Config Changes +var RestartPods bool = false + +// OrdsSrvsReconciler reconciles a OrdsSrvs object +type OrdsSrvsReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=configmaps/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=secrets/status,verbs=get +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=deployments/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=daemonsets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=statefulsets/status,verbs=get;update;patch + +// SetupWithManager sets up the controller with the Manager. +func (r *OrdsSrvsReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.OrdsSrvs{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&appsv1.Deployment{}). + Owns(&appsv1.StatefulSet{}). + Owns(&appsv1.DaemonSet{}). + Owns(&corev1.Service{}). + Complete(r) +} + +func (r *OrdsSrvsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logr := log.FromContext(ctx) + ords := &dbapi.OrdsSrvs{} + + // Check if resource exists or was deleted + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + if apierrors.IsNotFound(err) { + logr.Info("Resource deleted") + return ctrl.Result{}, nil + } + logr.Error(err, "Error retrieving resource") + return ctrl.Result{Requeue: true, RequeueAfter: time.Minute}, err + } + + // Set the status as Unknown when no status are available + if ords.Status.Conditions == nil || len(ords.Status.Conditions) == 0 { + condition := metav1.Condition{Type: typeUnsyncedORDS, Status: metav1.ConditionUnknown, Reason: "Reconciling", Message: "Starting reconciliation"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + + // ConfigMap - Init Script + if err := r.ConfigMapReconcile(ctx, ords, ords.Name+"-"+"init-script", 0); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (init-script)") + return ctrl.Result{}, err + } + + // ConfigMap - Global Settings + if err := r.ConfigMapReconcile(ctx, ords, ords.Name+"-"+globalConfigMapName, 0); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (Global)") + return ctrl.Result{}, err + } + + // ConfigMap - Pool Settings + definedPools := make(map[string]bool) + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ToLower(ords.Spec.PoolSettings[i].PoolName) + poolConfigMapName := ords.Name + "-" + poolConfigPreName + poolName + if definedPools[poolConfigMapName] { + return ctrl.Result{}, errors.New("poolName: " + poolName + " is not unique") + } + definedPools[poolConfigMapName] = true + if err := r.ConfigMapReconcile(ctx, ords, poolConfigMapName, i); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (Pools)") + return ctrl.Result{}, err + } + } + if err := r.ConfigMapDelete(ctx, req, ords, definedPools); err != nil { + logr.Error(err, "Error in ConfigMapDelete (Pools)") + return ctrl.Result{}, err + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + // // Secrets - Pool Settings + // for i := 0; i < len(ords.Spec.PoolSettings); i++ { + // if err := r.SecretsReconcile(ctx, ords, i); err != nil { + // logr.Error(err, "Error in SecretsReconcile (Pools)") + // return ctrl.Result{}, err + // } + // } + + // Set the Type as Unsynced when a pod restart is required + if RestartPods { + condition := metav1.Condition{Type: typeUnsyncedORDS, Status: metav1.ConditionTrue, Reason: "Unsynced", Message: "Configurations have changed"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + + // Workloads + if err := r.WorkloadReconcile(ctx, req, ords, ords.Spec.WorkloadType); err != nil { + logr.Error(err, "Error in WorkloadReconcile") + return ctrl.Result{}, err + } + if err := r.WorkloadDelete(ctx, req, ords, ords.Spec.WorkloadType); err != nil { + logr.Error(err, "Error in WorkloadDelete") + return ctrl.Result{}, err + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + // Service + if err := r.ServiceReconcile(ctx, ords); err != nil { + logr.Error(err, "Error in ServiceReconcile") + return ctrl.Result{}, err + } + + // Set the Type as Available when a pod restart is not required + if !RestartPods { + condition := metav1.Condition{Type: typeAvailableORDS, Status: metav1.ConditionTrue, Reason: "Available", Message: "Workload in Sync"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +/************************************************ + * Status + *************************************************/ +func (r *OrdsSrvsReconciler) SetStatus(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, statusCondition metav1.Condition) error { + logr := log.FromContext(ctx).WithName("SetStatus") + + // Fetch before Status Update + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return err + } + var readyWorkload int32 + var desiredWorkload int32 + switch ords.Spec.WorkloadType { + //nolint:goconst + case "StatefulSet": + workload := &appsv1.StatefulSet{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("StatefulSet not ready") + } + readyWorkload = workload.Status.ReadyReplicas + desiredWorkload = workload.Status.Replicas + //nolint:goconst + case "DaemonSet": + workload := &appsv1.DaemonSet{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("DaemonSet not ready") + } + readyWorkload = workload.Status.NumberReady + desiredWorkload = workload.Status.DesiredNumberScheduled + default: + workload := &appsv1.Deployment{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("Deployment not ready") + } + readyWorkload = workload.Status.ReadyReplicas + desiredWorkload = workload.Status.Replicas + } + + var workloadStatus string + if readyWorkload == 0 { + workloadStatus = "Preparing" + } else if readyWorkload == desiredWorkload { + workloadStatus = "Healthy" + ords.Status.OrdsInstalled = true + } else { + workloadStatus = "Progressing" + } + + mongoPort := int32(0) + if ords.Spec.GlobalSettings.MongoEnabled { + mongoPort = *ords.Spec.GlobalSettings.MongoPort + } + + meta.SetStatusCondition(&ords.Status.Conditions, statusCondition) + ords.Status.Status = workloadStatus + ords.Status.WorkloadType = ords.Spec.WorkloadType + ords.Status.ORDSVersion = strings.Split(ords.Spec.Image, ":")[1] + ords.Status.HTTPPort = ords.Spec.GlobalSettings.StandaloneHTTPPort + ords.Status.HTTPSPort = ords.Spec.GlobalSettings.StandaloneHTTPSPort + ords.Status.MongoPort = mongoPort + ords.Status.RestartRequired = RestartPods + if err := r.Status().Update(ctx, ords); err != nil { + logr.Error(err, "Failed to update Status") + return err + } + return nil +} + +/************************************************ + * ConfigMaps + *************************************************/ +func (r *OrdsSrvsReconciler) ConfigMapReconcile(ctx context.Context, ords *dbapi.OrdsSrvs, configMapName string, poolIndex int) (err error) { + logr := log.FromContext(ctx).WithName("ConfigMapReconcile") + desiredConfigMap := r.ConfigMapDefine(ctx, ords, configMapName, poolIndex) + + // Create if ConfigMap not found + definedConfigMap := &corev1.ConfigMap{} + if err = r.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: ords.Namespace}, definedConfigMap); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredConfigMap); err != nil { + return err + } + logr.Info("Created: " + configMapName) + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "ConfigMap %s Created", configMapName) + // Requery for comparison + if err := r.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: ords.Namespace}, definedConfigMap); err != nil { + return err + } + } else { + return err + } + } + if !equality.Semantic.DeepEqual(definedConfigMap.Data, desiredConfigMap.Data) { + if err = r.Update(ctx, desiredConfigMap); err != nil { + return err + } + logr.Info("Updated: " + configMapName) + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "ConfigMap %s Updated", configMapName) + } + return nil +} + +/************************************************ + * Secrets - TODO (Watch and set RestartPods) + *************************************************/ +// func (r *OrdsSrvsReconciler) SecretsReconcile(ctx context.Context, ords *dbapi.OrdsSrvs, poolIndex int) (err error) { +// logr := log.FromContext(ctx).WithName("SecretsReconcile") +// definedSecret := &corev1.Secret{} + +// // Want to set ownership on the Secret for watching; also detects if TNS_ADMIN is needed. +// if ords.Spec.PoolSettings[i].DBSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBAdminUserSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBCDBAdminUserSecret != nil { +// } +// if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBWalletSecret != nil { +// } + +// if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { +// tnsSecretName := ords.Spec.PoolSettings[i].TNSAdminSecret.SecretName +// definedSecret := &corev1.Secret{} +// if err = r.Get(ctx, types.NamespacedName{Name: tnsSecretName, Namespace: ords.Namespace}, definedSecret); err != nil { +// ojdbcPropertiesData, ok := secret.Data["ojdbc.properties"] +// if ok { +// if err = r.Update(ctx, desiredConfigMap); err != nil { +// return err +// } +// } +// } +// } + +// return nil +// } + +/************************************************ + * Workloads + *************************************************/ +func (r *OrdsSrvsReconciler) WorkloadReconcile(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, kind string) (err error) { + logr := log.FromContext(ctx).WithName("WorkloadReconcile") + objectMeta := objectMetaDefine(ords, ords.Name) + selector := selectorDefine(ords) + template := r.podTemplateSpecDefine(ords, ctx, req) + + var desiredWorkload client.Object + var desiredSpecHash string + var definedSpecHash string + + switch kind { + case "StatefulSet": + desiredWorkload = &appsv1.StatefulSet{ + ObjectMeta: objectMeta, + Spec: appsv1.StatefulSetSpec{ + Replicas: &ords.Spec.Replicas, + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.StatefulSet).Spec) + desiredWorkload.(*appsv1.StatefulSet).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + case "DaemonSet": + desiredWorkload = &appsv1.DaemonSet{ + ObjectMeta: objectMeta, + Spec: appsv1.DaemonSetSpec{ + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.DaemonSet).Spec) + desiredWorkload.(*appsv1.DaemonSet).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + default: + desiredWorkload = &appsv1.Deployment{ + ObjectMeta: objectMeta, + Spec: appsv1.DeploymentSpec{ + Replicas: &ords.Spec.Replicas, + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.Deployment).Spec) + desiredWorkload.(*appsv1.Deployment).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + } + + if err := ctrl.SetControllerReference(ords, desiredWorkload, r.Scheme); err != nil { + return err + } + + definedWorkload := reflect.New(reflect.TypeOf(desiredWorkload).Elem()).Interface().(client.Object) + if err = r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedWorkload); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredWorkload); err != nil { + condition := metav1.Condition{ + Type: typeAvailableORDS, + Status: metav1.ConditionFalse, + Reason: "Reconciling", + Message: fmt.Sprintf("Failed to create %s for the custom resource (%s): (%s)", kind, ords.Name, err), + } + if statusErr := r.SetStatus(ctx, req, ords, condition); statusErr != nil { + return statusErr + } + return err + } + logr.Info("Created: " + kind) + RestartPods = false + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "Created %s", kind) + + return nil + } else { + return err + } + } + + definedLabelsField := reflect.ValueOf(definedWorkload).Elem().FieldByName("ObjectMeta").FieldByName("Labels") + if definedLabelsField.IsValid() { + specHashValue := definedLabelsField.MapIndex(reflect.ValueOf(specHashLabel)) + if specHashValue.IsValid() { + definedSpecHash = specHashValue.Interface().(string) + } else { + return err + } + } + + if desiredSpecHash != definedSpecHash { + logr.Info("Syncing Workload " + kind + " with new configuration") + if err := r.Client.Update(ctx, desiredWorkload); err != nil { + return err + } + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Updated %s", kind) + } + + if RestartPods && ords.Spec.ForceRestart { + logr.Info("Cycling: " + kind) + labelsField := reflect.ValueOf(desiredWorkload).Elem().FieldByName("Spec").FieldByName("Template").FieldByName("ObjectMeta").FieldByName("Labels") + if labelsField.IsValid() { + labels := labelsField.Interface().(map[string]string) + labels["configMapChanged"] = time.Now().Format("20060102T150405Z") + labelsField.Set(reflect.ValueOf(labels)) + if err := r.Update(ctx, desiredWorkload); err != nil { + return err + } + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Restart", "Restarted %s", kind) + RestartPods = false + } + } + + return nil +} + +// Service +func (r *OrdsSrvsReconciler) ServiceReconcile(ctx context.Context, ords *dbapi.OrdsSrvs) (err error) { + logr := log.FromContext(ctx).WithName("ServiceReconcile") + + HTTPport := *ords.Spec.GlobalSettings.StandaloneHTTPPort + HTTPSport := *ords.Spec.GlobalSettings.StandaloneHTTPSPort + MongoPort := *ords.Spec.GlobalSettings.MongoPort + + desiredService := r.ServiceDefine(ctx, ords, HTTPport, HTTPSport, MongoPort) + + definedService := &corev1.Service{} + if err = r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedService); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredService); err != nil { + return err + } + logr.Info("Created: Service") + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "Service %s Created", ords.Name) + // Requery for comparison + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedService); err != nil { + return err + } + } else { + return err + } + } + + deisredPortCount := len(desiredService.Spec.Ports) + definedPortCount := len(definedService.Spec.Ports) + + if deisredPortCount != definedPortCount { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + } + + for _, existingPort := range definedService.Spec.Ports { + if existingPort.Name == serviceHTTPPortName { + if existingPort.Port != HTTPport { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated HTTP Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service HTTP Port %s Updated", existingPort.Name) + } + } + if existingPort.Name == serviceHTTPSPortName { + if existingPort.Port != HTTPSport { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated HTTPS Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service HTTPS Port %s Updated", existingPort.Name) + } + } + if existingPort.Name == serviceMongoPortName { + if existingPort.Port != MongoPort { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated Mongo Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service Mongo Port %s Updated", existingPort.Name) + } + } + } + return nil +} + +/* +************************************************ + - Definers + +************************************************* +*/ +func objectMetaDefine(ords *dbapi.OrdsSrvs, name string) metav1.ObjectMeta { + labels := getLabels(ords.Name) + return metav1.ObjectMeta{ + Name: name, + Namespace: ords.Namespace, + Labels: labels, + } +} + +func selectorDefine(ords *dbapi.OrdsSrvs) metav1.LabelSelector { + labels := getLabels(ords.Name) + return metav1.LabelSelector{ + MatchLabels: labels, + } +} + +func (r *OrdsSrvsReconciler) podTemplateSpecDefine(ords *dbapi.OrdsSrvs, ctx context.Context, req ctrl.Request) corev1.PodTemplateSpec { + labels := getLabels(ords.Name) + specVolumes, specVolumeMounts := VolumesDefine(ords) + + envPorts := []corev1.ContainerPort{ + { + ContainerPort: *ords.Spec.GlobalSettings.StandaloneHTTPPort, + Name: targetHTTPPortName, + }, + { + ContainerPort: *ords.Spec.GlobalSettings.StandaloneHTTPSPort, + Name: targetHTTPSPortName, + }, + } + + if ords.Spec.GlobalSettings.MongoEnabled { + mongoPort := corev1.ContainerPort{ + ContainerPort: *ords.Spec.GlobalSettings.MongoPort, + Name: targetMongoPortName, + } + envPorts = append(envPorts, mongoPort) + } + + // Environment From Source + podSpecTemplate := + corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Volumes: specVolumes, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + FSGroup: &[]int64{54321}[0], + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + InitContainers: []corev1.Container{{ + Image: ords.Spec.Image, + Name: ords.Name + "-init", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefine(), + Command: []string{"sh", "-c", ordsSABase + "/bin/init_script.sh"}, + Env: r.envDefine(ords, true, ctx), + VolumeMounts: specVolumeMounts, + }}, + Containers: []corev1.Container{{ + Image: ords.Spec.Image, + Name: ords.Name, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefine(), + Ports: envPorts, + Command: []string{"/bin/bash", "-c", "ords --config $ORDS_CONFIG serve --apex-images /opt/oracle/apex/$APEX_VER/images --debug"}, + Env: r.envDefine(ords, false, ctx), + VolumeMounts: specVolumeMounts, + }}}, + } + + return podSpecTemplate +} + +// Volumes +func VolumesDefine(ords *dbapi.OrdsSrvs) ([]corev1.Volume, []corev1.VolumeMount) { + // Initialize the slice to hold specifications + var volumes []corev1.Volume + var volumeMounts []corev1.VolumeMount + + // SecretHelper + secretHelperVolume := volumeBuild(ords.Name+"-"+"init-script", "ConfigMap", 0770) + secretHelperVolumeMount := volumeMountBuild(ords.Name+"-"+"init-script", ordsSABase+"/bin", true) + + volumes = append(volumes, secretHelperVolume) + volumeMounts = append(volumeMounts, secretHelperVolumeMount) + + // Build volume specifications for globalSettings + standaloneVolume := volumeBuild("standalone", "EmptyDir") + standaloneVolumeMount := volumeMountBuild("standalone", ordsSABase+"/config/global/standalone/", false) + + globalWalletVolume := volumeBuild("sa-wallet-global", "EmptyDir") + globalWalletVolumeMount := volumeMountBuild("sa-wallet-global", ordsSABase+"/config/global/wallet/", false) + + globalLogVolume := volumeBuild("sa-log-global", "EmptyDir") + globalLogVolumeMount := volumeMountBuild("sa-log-global", ordsSABase+"/log/global/", false) + + globalConfigVolume := volumeBuild(ords.Name+"-"+globalConfigMapName, "ConfigMap") + globalConfigVolumeMount := volumeMountBuild(ords.Name+"-"+globalConfigMapName, ordsSABase+"/config/global/", true) + + globalDocRootVolume := volumeBuild("sa-doc-root", "EmptyDir") + globalDocRootVolumeMount := volumeMountBuild("sa-doc-root", ordsSABase+"/config/global/doc_root/", false) + + volumes = append(volumes, standaloneVolume, globalWalletVolume, globalLogVolume, globalConfigVolume, globalDocRootVolume) + volumeMounts = append(volumeMounts, standaloneVolumeMount, globalWalletVolumeMount, globalLogVolumeMount, globalConfigVolumeMount, globalDocRootVolumeMount) + + if ords.Spec.GlobalSettings.CertSecret != nil { + globalCertVolume := volumeBuild(ords.Spec.GlobalSettings.CertSecret.SecretName, "Secret") + globalCertVolumeMount := volumeMountBuild(ords.Spec.GlobalSettings.CertSecret.SecretName, ordsSABase+"/config/certficate/", true) + + volumes = append(volumes, globalCertVolume) + volumeMounts = append(volumeMounts, globalCertVolumeMount) + } + + // Build volume specifications for each pool in poolSettings + definedWalletSecret := make(map[string]bool) + definedTNSSecret := make(map[string]bool) + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ToLower(ords.Spec.PoolSettings[i].PoolName) + + poolWalletName := "sa-wallet-" + poolName + poolWalletVolume := volumeBuild(poolWalletName, "EmptyDir") + poolWalletVolumeMount := volumeMountBuild(poolWalletName, ordsSABase+"/config/databases/"+poolName+"/wallet/", false) + + poolConfigName := ords.Name + "-" + poolConfigPreName + poolName + poolConfigVolume := volumeBuild(poolConfigName, "ConfigMap") + poolConfigVolumeMount := volumeMountBuild(poolConfigName, ordsSABase+"/config/databases/"+poolName+"/", true) + + volumes = append(volumes, poolWalletVolume, poolConfigVolume) + volumeMounts = append(volumeMounts, poolWalletVolumeMount, poolConfigVolumeMount) + + if ords.Spec.PoolSettings[i].DBWalletSecret != nil { + walletSecretName := ords.Spec.PoolSettings[i].DBWalletSecret.SecretName + if !definedWalletSecret[walletSecretName] { + // Only create the volume once + poolDBWalletVolume := volumeBuild(walletSecretName, "Secret") + volumes = append(volumes, poolDBWalletVolume) + definedWalletSecret[walletSecretName] = true + } + poolDBWalletVolumeMount := volumeMountBuild(walletSecretName, ordsSABase+"/config/databases/"+poolName+"/network/admin/", true) + volumeMounts = append(volumeMounts, poolDBWalletVolumeMount) + } + + if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { + tnsSecretName := ords.Spec.PoolSettings[i].TNSAdminSecret.SecretName + if !definedTNSSecret[tnsSecretName] { + // Only create the volume once + poolTNSAdminVolume := volumeBuild(tnsSecretName, "Secret") + volumes = append(volumes, poolTNSAdminVolume) + definedTNSSecret[tnsSecretName] = true + } + poolTNSAdminVolumeMount := volumeMountBuild(tnsSecretName, ordsSABase+"/config/databases/"+poolName+"/network/admin/", true) + volumeMounts = append(volumeMounts, poolTNSAdminVolumeMount) + } + } + return volumes, volumeMounts +} + +func volumeMountBuild(name string, path string, readOnly bool) corev1.VolumeMount { + return corev1.VolumeMount{ + Name: name, + MountPath: path, + ReadOnly: readOnly, + } +} + +func volumeBuild(name string, source string, mode ...int32) corev1.Volume { + defaultMode := int32(0660) + if len(mode) > 0 { + defaultMode = mode[0] + } + switch source { + case "ConfigMap": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + case "Secret": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + } + case "EmptyDir": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + default: + return corev1.Volume{} + } +} + +// Service +func (r *OrdsSrvsReconciler) ServiceDefine(ctx context.Context, ords *dbapi.OrdsSrvs, HTTPport int32, HTTPSport int32, MongoPort int32) *corev1.Service { + labels := getLabels(ords.Name) + + servicePorts := []corev1.ServicePort{ + { + Name: serviceHTTPPortName, + Protocol: corev1.ProtocolTCP, + Port: HTTPport, + TargetPort: intstr.FromString(targetHTTPPortName), + }, + { + Name: serviceHTTPSPortName, + Protocol: corev1.ProtocolTCP, + Port: HTTPSport, + TargetPort: intstr.FromString(targetHTTPSPortName), + }, + } + + if ords.Spec.GlobalSettings.MongoEnabled { + mongoServicePort := corev1.ServicePort{ + Name: serviceMongoPortName, + Protocol: corev1.ProtocolTCP, + Port: MongoPort, + TargetPort: intstr.FromString(targetMongoPortName), + } + servicePorts = append(servicePorts, mongoServicePort) + } + + objectMeta := objectMetaDefine(ords, ords.Name) + def := &corev1.Service{ + ObjectMeta: objectMeta, + Spec: corev1.ServiceSpec{ + Selector: labels, + Ports: servicePorts, + }, + } + + // Set the ownerRef + if err := ctrl.SetControllerReference(ords, def, r.Scheme); err != nil { + return nil + } + return def +} + +func securityContextDefine() *corev1.SecurityContext { + return &corev1.SecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + RunAsUser: &[]int64{54321}[0], + AllowPrivilegeEscalation: &[]bool{false}[0], + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + } +} + +func (r *OrdsSrvsReconciler) envDefine(ords *dbapi.OrdsSrvs, initContainer bool, ctx context.Context) []corev1.EnvVar { + envVarSecrets := []corev1.EnvVar{ + { + Name: "ORDS_CONFIG", + Value: ordsSABase + "/config", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: "-Doracle.ml.version_check=false", + }, + } + + // Limitation case for ADB/mTLS/OraOper edge + if len(ords.Spec.PoolSettings) == 1 { + poolName := strings.ToLower(ords.Spec.PoolSettings[0].PoolName) + tnsAdmin := corev1.EnvVar{ + Name: "TNS_ADMIN", + Value: ordsSABase + "/config/databases/" + poolName + "/network/admin/", + } + envVarSecrets = append(envVarSecrets, tnsAdmin) + } + if initContainer { + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ReplaceAll(strings.ToLower(ords.Spec.PoolSettings[i].PoolName), "-", "_") + + dbSecret := corev1.EnvVar{ + Name: poolName + "_dbsecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBSecret.SecretName, ords.Spec.PoolSettings[i].DBSecret.PasswordKey, ctx), + } + + envVarSecrets = append(envVarSecrets, dbSecret) + + if ords.Spec.PoolSettings[i].DBAdminUserSecret.SecretName != "" { + autoUpgradeORDSEnv := corev1.EnvVar{ + Name: poolName + "_autoupgrade_ords", + Value: strconv.FormatBool(ords.Spec.PoolSettings[i].AutoUpgradeORDS), + } + autoUpgradeAPEXEnv := corev1.EnvVar{ + Name: poolName + "_autoupgrade_apex", + Value: strconv.FormatBool(ords.Spec.PoolSettings[i].AutoUpgradeAPEX), + } + + dbAdminUserSecret := corev1.EnvVar{ + Name: poolName + "_dbadminusersecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBAdminUserSecret.SecretName, ords.Spec.PoolSettings[i].DBAdminUserSecret.PasswordKey, ctx), + } + envVarSecrets = append(envVarSecrets, dbAdminUserSecret, autoUpgradeORDSEnv, autoUpgradeAPEXEnv) + } + + if ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.SecretName != "" { + + dbCDBAdminUserSecret := corev1.EnvVar{ + Name: poolName + "_dbcdbadminusersecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.SecretName, ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.PasswordKey, ctx), + } + + envVarSecrets = append(envVarSecrets, dbCDBAdminUserSecret) + } + } + } + + return envVarSecrets +} + +/************************************************* + * Deletions + **************************************************/ +func (r *OrdsSrvsReconciler) ConfigMapDelete(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, definedPools map[string]bool) (err error) { + // Delete Undefined Pool ConfigMaps + configMapList := &corev1.ConfigMapList{} + if err := r.List(ctx, configMapList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + for _, configMap := range configMapList.Items { + if configMap.Name == ords.Name+"-"+globalConfigMapName || configMap.Name == ords.Name+"-init-script" { + continue + } + if _, exists := definedPools[configMap.Name]; !exists { + if err := r.Delete(ctx, &configMap); err != nil { + return err + } + RestartPods = ords.Spec.ForceRestart + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "ConfigMap %s Deleted", configMap.Name) + } + } + + return nil +} + +func (r *OrdsSrvsReconciler) WorkloadDelete(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, kind string) (err error) { + logr := log.FromContext(ctx).WithName("WorkloadDelete") + + // Get Workloads + deploymentList := &appsv1.DeploymentList{} + if err := r.List(ctx, deploymentList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + statefulSetList := &appsv1.StatefulSetList{} + if err := r.List(ctx, statefulSetList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + daemonSetList := &appsv1.DaemonSetList{} + if err := r.List(ctx, daemonSetList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + switch kind { + case "StatefulSet": + for _, deleteDaemonSet := range daemonSetList.Items { + if err := r.Delete(ctx, &deleteDaemonSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteDeployment := range deploymentList.Items { + if err := r.Delete(ctx, &deleteDeployment); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + case "DaemonSet": + for _, deleteDeployment := range deploymentList.Items { + if err := r.Delete(ctx, &deleteDeployment); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteStatefulSet := range statefulSetList.Items { + if err := r.Delete(ctx, &deleteStatefulSet); err != nil { + return err + } + logr.Info("Deleted StatefulSet: " + deleteStatefulSet.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + default: + for _, deleteStatefulSet := range statefulSetList.Items { + if err := r.Delete(ctx, &deleteStatefulSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteDaemonSet := range daemonSetList.Items { + if err := r.Delete(ctx, &deleteDaemonSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + } + return nil +} + +/************************************************* + * Helpers + **************************************************/ +func getLabels(name string) map[string]string { + return map[string]string{ + "app.kubernetes.io/instance": name, + controllerLabelKey: controllerLabelVal, + } +} + +func generateSpecHash(spec interface{}) string { + byteArray, err := json.Marshal(spec) + if err != nil { + return "" + } + + hash := sha256.New() + _, err = hash.Write(byteArray) + if err != nil { + return "" + } + + hashBytes := hash.Sum(nil) + hashString := hex.EncodeToString(hashBytes[:8]) + + return hashString +} + +func CommonDecryptWithPrivKey(Key string, Buffer string) (string, error) { + + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + fmt.Printf("Failed to parse private key %s \n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + fmt.Printf("Failed to decode encrypted string to base64: %s\n", err.Error()) + return "", err + } + + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", encString64) + fmt.Printf("======================================\n") + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + fmt.Printf("Failed to decrypt string %s\n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +func (r *OrdsSrvsReconciler) CommonDecryptWithPrivKey3(ords *dbapi.OrdsSrvs, sname string, skey string, ctx context.Context) string { + logr := log.FromContext(ctx).WithName("CommonDecryptWithPrivKey2") + secret_par := &corev1.Secret{} + fmt.Printf("sname: %s\n", sname) + fmt.Printf("skey: %s\n", skey) + err := r.Get(ctx, types.NamespacedName{Name: sname, Namespace: ords.Namespace}, secret_par) + if err != nil { + logr.Error(err, "Cannot read secret"+sname) + return "" + } + encVal := string(secret_par.Data[skey]) + encVal = strings.TrimSpace(encVal) + + secret_key := &corev1.Secret{} + /* get private key */ + if err := r.Get(ctx, types.NamespacedName{Name: ords.Spec.EncPrivKey.SecretName, + Namespace: ords.Namespace}, secret_key); err != nil { + logr.Error(err, "Cannot get privte key") + return "" + } + PrvKeyVal := string(secret_key.Data[ords.Spec.EncPrivKey.PasswordKey]) + PrvKeyVal = strings.TrimSpace(PrvKeyVal) + + decVal, err := CommonDecryptWithPrivKey(PrvKeyVal, encVal) + if err != nil { + logr.Error(err, "Fail to decrypt secret") + return "" + } + + logr.Info("Password decryption completed") + + return decVal +} diff --git a/controllers/database/ordssrvs_ordsconfig.go b/controllers/database/ordssrvs_ordsconfig.go new file mode 100644 index 00000000..edb2e0f6 --- /dev/null +++ b/controllers/database/ordssrvs_ordsconfig.go @@ -0,0 +1,258 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +func (r *OrdsSrvsReconciler) ConfigMapDefine(ctx context.Context, ords *dbapi.OrdsSrvs, configMapName string, poolIndex int) *corev1.ConfigMap { + var defData map[string]string + if configMapName == ords.Name+"-init-script" { + // Read the file from controller's filesystem + filePath := "/ords_init.sh" + scriptData, err := os.ReadFile(filePath) + if err != nil { + return nil + } + defData = map[string]string{ + "init_script.sh": string(scriptData)} + } else if configMapName == ords.Name+"-"+globalConfigMapName { + // GlobalConfigMap + var defStandaloneAccessLog string + if ords.Spec.GlobalSettings.EnableStandaloneAccessLog { + defStandaloneAccessLog = ` ` + ordsSABase + `/log/global` + "\n" + } + var defMongoAccessLog string + if ords.Spec.GlobalSettings.EnableMongoAccessLog { + defMongoAccessLog = ` ` + ordsSABase + `/log/global` + "\n" + } + var defCert string + if ords.Spec.GlobalSettings.CertSecret != nil { + defCert = ` ` + ordsSABase + `/config/certficate/` + ords.Spec.GlobalSettings.CertSecret.Certificate + `` + "\n" + + ` ` + ordsSABase + `/config/certficate/` + ords.Spec.GlobalSettings.CertSecret.CertificateKey + `` + "\n" + } + defData = map[string]string{ + "settings.xml": fmt.Sprintf(`` + "\n" + + `` + "\n" + + `` + "\n" + + conditionalEntry("cache.metadata.graphql.expireAfterAccess", ords.Spec.GlobalSettings.CacheMetadataGraphQLExpireAfterAccess) + + conditionalEntry("cache.metadata.jwks.enabled", ords.Spec.GlobalSettings.CacheMetadataJWKSEnabled) + + conditionalEntry("cache.metadata.jwks.initialCapacity", ords.Spec.GlobalSettings.CacheMetadataJWKSInitialCapacity) + + conditionalEntry("cache.metadata.jwks.maximumSize", ords.Spec.GlobalSettings.CacheMetadataJWKSMaximumSize) + + conditionalEntry("cache.metadata.jwks.expireAfterAccess", ords.Spec.GlobalSettings.CacheMetadataJWKSExpireAfterAccess) + + conditionalEntry("cache.metadata.jwks.expireAfterWrite", ords.Spec.GlobalSettings.CacheMetadataJWKSExpireAfterWrite) + + conditionalEntry("database.api.management.services.disabled", ords.Spec.GlobalSettings.DatabaseAPIManagementServicesDisabled) + + conditionalEntry("db.invalidPoolTimeout", ords.Spec.GlobalSettings.DBInvalidPoolTimeout) + + conditionalEntry("feature.graphql.max.nesting.depth", ords.Spec.GlobalSettings.FeatureGraphQLMaxNestingDepth) + + conditionalEntry("request.traceHeaderName", ords.Spec.GlobalSettings.RequestTraceHeaderName) + + conditionalEntry("security.credentials.attempts", ords.Spec.GlobalSettings.SecurityCredentialsAttempts) + + conditionalEntry("security.credentials.lock.time", ords.Spec.GlobalSettings.SecurityCredentialsLockTime) + + conditionalEntry("standalone.context.path", ords.Spec.GlobalSettings.StandaloneContextPath) + + conditionalEntry("standalone.http.port", ords.Spec.GlobalSettings.StandaloneHTTPPort) + + conditionalEntry("standalone.https.host", ords.Spec.GlobalSettings.StandaloneHTTPSHost) + + conditionalEntry("standalone.https.port", ords.Spec.GlobalSettings.StandaloneHTTPSPort) + + conditionalEntry("standalone.stop.timeout", ords.Spec.GlobalSettings.StandaloneStopTimeout) + + conditionalEntry("cache.metadata.timeout", ords.Spec.GlobalSettings.CacheMetadataTimeout) + + conditionalEntry("cache.metadata.enabled", ords.Spec.GlobalSettings.CacheMetadataEnabled) + + conditionalEntry("database.api.enabled", ords.Spec.GlobalSettings.DatabaseAPIEnabled) + + conditionalEntry("debug.printDebugToScreen", ords.Spec.GlobalSettings.DebugPrintDebugToScreen) + + conditionalEntry("error.responseFormat", ords.Spec.GlobalSettings.ErrorResponseFormat) + + conditionalEntry("icap.port", ords.Spec.GlobalSettings.ICAPPort) + + conditionalEntry("icap.secure.port", ords.Spec.GlobalSettings.ICAPSecurePort) + + conditionalEntry("icap.server", ords.Spec.GlobalSettings.ICAPServer) + + conditionalEntry("log.procedure", ords.Spec.GlobalSettings.LogProcedure) + + conditionalEntry("mongo.enabled", ords.Spec.GlobalSettings.MongoEnabled) + + conditionalEntry("mongo.port", ords.Spec.GlobalSettings.MongoPort) + + conditionalEntry("mongo.idle.timeout", ords.Spec.GlobalSettings.MongoIdleTimeout) + + conditionalEntry("mongo.op.timeout", ords.Spec.GlobalSettings.MongoOpTimeout) + + conditionalEntry("security.disableDefaultExclusionList", ords.Spec.GlobalSettings.SecurityDisableDefaultExclusionList) + + conditionalEntry("security.exclusionList", ords.Spec.GlobalSettings.SecurityExclusionList) + + conditionalEntry("security.inclusionList", ords.Spec.GlobalSettings.SecurityInclusionList) + + conditionalEntry("security.maxEntries", ords.Spec.GlobalSettings.SecurityMaxEntries) + + conditionalEntry("security.verifySSL", ords.Spec.GlobalSettings.SecurityVerifySSL) + + conditionalEntry("security.httpsHeaderCheck", ords.Spec.GlobalSettings.SecurityHTTPSHeaderCheck) + + conditionalEntry("security.forceHTTPS", ords.Spec.GlobalSettings.SecurityForceHTTPS) + + conditionalEntry("externalSessionTrustedOrigins", ords.Spec.GlobalSettings.SecuirtyExternalSessionTrustedOrigins) + + ` ` + ordsSABase + `/config/global/doc_root/` + "\n" + + // Dynamic + defStandaloneAccessLog + + defMongoAccessLog + + defCert + + // Disabled (but not forgotten) + // conditionalEntry("standalone.binds", ords.Spec.GlobalSettings.StandaloneBinds) + + // conditionalEntry("error.externalPath", ords.Spec.GlobalSettings.ErrorExternalPath) + + // conditionalEntry("security.credentials.file ", ords.Spec.GlobalSettings.SecurityCredentialsFile) + + // conditionalEntry("standalone.static.path", ords.Spec.GlobalSettings.StandaloneStaticPath) + + // conditionalEntry("standalone.doc.root", ords.Spec.GlobalSettings.StandaloneDocRoot) + + // conditionalEntry("standalone.static.context.path", ords.Spec.GlobalSettings.StandaloneStaticContextPath) + + ``), + "logging.properties": fmt.Sprintf(`handlers=java.util.logging.FileHandler` + "\n" + + `.level=SEVERE` + "\n" + + `java.util.logging.FileHandler.level=ALL` + "\n" + + `oracle.dbtools.level=FINEST` + "\n" + + `java.util.logging.FileHandler.pattern = ` + ordsSABase + `/log/global/debug.log` + "\n" + + `java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter`), + } + } else { + // PoolConfigMap + poolName := strings.ToLower(ords.Spec.PoolSettings[poolIndex].PoolName) + var defDBNetworkPath string + if ords.Spec.PoolSettings[poolIndex].DBWalletSecret != nil { + defDBNetworkPath = ` ` + ordsSABase + `/config/databases/` + poolName + `/network/admin/` + ords.Spec.PoolSettings[poolIndex].DBWalletSecret.WalletName + `` + "\n" + + conditionalEntry("db.wallet.zip.service", strings.ToUpper(ords.Spec.PoolSettings[poolIndex].DBWalletZipService)) + "\n" + } else { + defDBNetworkPath = ` ` + ordsSABase + `/config/databases/` + poolName + `/network/admin/` + "\n" + } + defData = map[string]string{ + "pool.xml": fmt.Sprintf(`` + "\n" + + `` + "\n" + + `` + "\n" + + ` ` + ords.Spec.PoolSettings[poolIndex].DBUsername + `` + "\n" + + conditionalEntry("db.adminUser", ords.Spec.PoolSettings[poolIndex].DBAdminUser) + + conditionalEntry("db.cdb.adminUser", ords.Spec.PoolSettings[poolIndex].DBCDBAdminUser) + + conditionalEntry("apex.security.administrator.roles", ords.Spec.PoolSettings[poolIndex].ApexSecurityAdministratorRoles) + + conditionalEntry("apex.security.user.roles", ords.Spec.PoolSettings[poolIndex].ApexSecurityUserRoles) + + conditionalEntry("db.credentialsSource", ords.Spec.PoolSettings[poolIndex].DBCredentialsSource) + + conditionalEntry("db.poolDestroyTimeout", ords.Spec.PoolSettings[poolIndex].DBPoolDestroyTimeout) + + conditionalEntry("debug.trackResources", ords.Spec.PoolSettings[poolIndex].DebugTrackResources) + + conditionalEntry("feature.openservicebroker.exclude", ords.Spec.PoolSettings[poolIndex].FeatureOpenservicebrokerExclude) + + conditionalEntry("feature.sdw", ords.Spec.PoolSettings[poolIndex].FeatureSDW) + + conditionalEntry("http.cookie.filter", ords.Spec.PoolSettings[poolIndex].HttpCookieFilter) + + conditionalEntry("jdbc.auth.admin.role", ords.Spec.PoolSettings[poolIndex].JDBCAuthAdminRole) + + conditionalEntry("jdbc.cleanup.mode", ords.Spec.PoolSettings[poolIndex].JDBCCleanupMode) + + conditionalEntry("owa.trace.sql", ords.Spec.PoolSettings[poolIndex].OwaTraceSql) + + conditionalEntry("plsql.gateway.mode", ords.Spec.PoolSettings[poolIndex].PlsqlGatewayMode) + + conditionalEntry("security.jwt.profile.enabled", ords.Spec.PoolSettings[poolIndex].SecurityJWTProfileEnabled) + + conditionalEntry("security.jwks.size", ords.Spec.PoolSettings[poolIndex].SecurityJWKSSize) + + conditionalEntry("security.jwks.connection.timeout", ords.Spec.PoolSettings[poolIndex].SecurityJWKSConnectionTimeout) + + conditionalEntry("security.jwks.read.timeout", ords.Spec.PoolSettings[poolIndex].SecurityJWKSReadTimeout) + + conditionalEntry("security.jwks.refresh.interval", ords.Spec.PoolSettings[poolIndex].SecurityJWKSRefreshInterval) + + conditionalEntry("security.jwt.allowed.skew", ords.Spec.PoolSettings[poolIndex].SecurityJWTAllowedSkew) + + conditionalEntry("security.jwt.allowed.age", ords.Spec.PoolSettings[poolIndex].SecurityJWTAllowedAge) + + conditionalEntry("db.connectionType", ords.Spec.PoolSettings[poolIndex].DBConnectionType) + + conditionalEntry("db.customURL", ords.Spec.PoolSettings[poolIndex].DBCustomURL) + + conditionalEntry("db.hostname", ords.Spec.PoolSettings[poolIndex].DBHostname) + + conditionalEntry("db.port", ords.Spec.PoolSettings[poolIndex].DBPort) + + conditionalEntry("db.servicename", ords.Spec.PoolSettings[poolIndex].DBServicename) + + conditionalEntry("db.sid", ords.Spec.PoolSettings[poolIndex].DBSid) + + conditionalEntry("db.tnsAliasName", ords.Spec.PoolSettings[poolIndex].DBTnsAliasName) + + conditionalEntry("jdbc.DriverType", ords.Spec.PoolSettings[poolIndex].JDBCDriverType) + + conditionalEntry("jdbc.InactivityTimeout", ords.Spec.PoolSettings[poolIndex].JDBCInactivityTimeout) + + conditionalEntry("jdbc.InitialLimit", ords.Spec.PoolSettings[poolIndex].JDBCInitialLimit) + + conditionalEntry("jdbc.MaxConnectionReuseCount", ords.Spec.PoolSettings[poolIndex].JDBCMaxConnectionReuseCount) + + conditionalEntry("jdbc.MaxLimit", ords.Spec.PoolSettings[poolIndex].JDBCMaxLimit) + + conditionalEntry("jdbc.auth.enabled", ords.Spec.PoolSettings[poolIndex].JDBCAuthEnabled) + + conditionalEntry("jdbc.MaxStatementsLimit", ords.Spec.PoolSettings[poolIndex].JDBCMaxStatementsLimit) + + conditionalEntry("jdbc.MinLimit", ords.Spec.PoolSettings[poolIndex].JDBCMinLimit) + + conditionalEntry("jdbc.statementTimeout", ords.Spec.PoolSettings[poolIndex].JDBCStatementTimeout) + + conditionalEntry("jdbc.MaxConnectionReuseTime", ords.Spec.PoolSettings[poolIndex].JDBCMaxConnectionReuseTime) + + conditionalEntry("jdbc.SecondsToTrustIdleConnection", ords.Spec.PoolSettings[poolIndex].JDBCSecondsToTrustIdleConnection) + + conditionalEntry("misc.defaultPage", ords.Spec.PoolSettings[poolIndex].MiscDefaultPage) + + conditionalEntry("misc.pagination.maxRows", ords.Spec.PoolSettings[poolIndex].MiscPaginationMaxRows) + + conditionalEntry("procedure.postProcess", ords.Spec.PoolSettings[poolIndex].ProcedurePostProcess) + + conditionalEntry("procedure.preProcess", ords.Spec.PoolSettings[poolIndex].ProcedurePreProcess) + + conditionalEntry("procedure.rest.preHook", ords.Spec.PoolSettings[poolIndex].ProcedureRestPreHook) + + conditionalEntry("security.requestAuthenticationFunction", ords.Spec.PoolSettings[poolIndex].SecurityRequestAuthenticationFunction) + + conditionalEntry("security.requestValidationFunction", ords.Spec.PoolSettings[poolIndex].SecurityRequestValidationFunction) + + conditionalEntry("soda.defaultLimit", ords.Spec.PoolSettings[poolIndex].SODADefaultLimit) + + conditionalEntry("soda.maxLimit", ords.Spec.PoolSettings[poolIndex].SODAMaxLimit) + + conditionalEntry("restEnabledSql.active", ords.Spec.PoolSettings[poolIndex].RestEnabledSqlActive) + + defDBNetworkPath + + // Disabled (but not forgotten) + // conditionalEntry("autoupgrade.api.aulocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIAulocation) + + // conditionalEntry("autoupgrade.api.enabled", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIEnabled) + + // conditionalEntry("autoupgrade.api.jvmlocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIJvmlocation) + + // conditionalEntry("autoupgrade.api.loglocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPILoglocation) + + // conditionalEntry("db.serviceNameSuffix", ords.Spec.PoolSettings[poolIndex].DBServiceNameSuffix) + + ``), + } + } + + objectMeta := objectMetaDefine(ords, configMapName) + def := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: objectMeta, + Data: defData, + } + + // Set the ownerRef + if err := ctrl.SetControllerReference(ords, def, r.Scheme); err != nil { + return nil + } + return def +} + +func conditionalEntry(key string, value interface{}) string { + switch v := value.(type) { + case nil: + return "" + case string: + if v != "" { + return fmt.Sprintf(` %s`+"\n", key, v) + } + case *int32: + if v != nil { + return fmt.Sprintf(` %d`+"\n", key, *v) + } + case *bool: + if v != nil { + return fmt.Sprintf(` %v`+"\n", key, *v) + } + case *time.Duration: + if v != nil { + return fmt.Sprintf(` %v`+"\n", key, *v) + } + default: + return fmt.Sprintf(` %v`+"\n", key, v) + } + return "" +} diff --git a/controllers/database/pdb_controller.go b/controllers/database/pdb_controller.go new file mode 100644 index 00000000..a2ca0f85 --- /dev/null +++ b/controllers/database/pdb_controller.go @@ -0,0 +1,1631 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + + //"encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// PDBReconciler reconciles a PDB object +type PDBReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Interval time.Duration + Recorder record.EventRecorder +} + +type controllers struct { + Pdbc PDBReconciler + Cdbc CDBReconciler +} + +type RESTSQLCollection struct { + Env struct { + DefaultTimeZone string `json:"defaultTimeZone,omitempty"` + } `json:"env"` + Items []SQLItem `json:"items"` +} + +type SQLItem struct { + StatementId int `json:"statementId,omitempty"` + Response []string `json:"response"` + ErrorCode int `json:"errorCode,omitempty"` + ErrorLine int `json:"errorLine,omitempty"` + ErrorColumn int `json:"errorColumn,omitempty"` + ErrorDetails string `json:"errorDetails,omitempty"` + Result int `json:"result,omitempty"` +} + +type ORDSError struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` + Instance string `json:"instance,omitempty"` +} + +var ( + pdbPhaseCreate = "Creating" + pdbPhasePlug = "Plugging" + pdbPhaseUnplug = "Unplugging" + pdbPhaseClone = "Cloning" + pdbPhaseFinish = "Finishing" + pdbPhaseReady = "Ready" + pdbPhaseDelete = "Deleting" + pdbPhaseModify = "Modifying" + pdbPhaseMap = "Mapping" + pdbPhaseStatus = "CheckingState" + pdbPhaseFail = "Failed" +) + +const PDBFinalizer = "database.oracle.com/PDBfinalizer" +const ONE = 1 +const ZERO = 0 + +var tdePassword string +var tdeSecret string +var floodcontrol bool = false +var assertivePdbDeletion bool = false /* Global variable for assertive pdb deletion */ + +//+kubebuilder:rbac:groups=database.oracle.com,resources=pdbs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=pdbs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=pdbs/finalizers,verbs=get;create;update;patch;delete + +// +kubebuilder:rbac:groups=core,resources=pods;pods/log;pods/exec;secrets;containers;services;events;configmaps;namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups='',resources=statefulsets/finalizers,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the PDB object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile + +func (r *PDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + pdb := &dbapi.PDB{} + + // Execute for every reconcile + defer func() { + //log.Info("DEFER PDB", "Name", pdb.Name, "Phase", pdb.Status.Phase, "Status", strconv.FormatBool(pdb.Status.Status)) + if !pdb.Status.Status { + if pdb.Status.Phase == pdbPhaseReady { + pdb.Status.Status = true + } + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, pdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("PDB Resource Not found", "Name", pdb.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + pdb.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + // Finalizer section + err = r.managePDBDeletion2(ctx, req, pdb) + if err != nil { + log.Info("managePDBDeletion2 Error Deleting resource ") + return requeueY, nil + } + + // Check for Duplicate PDB + if !pdb.Status.Status { + err = r.checkDuplicatePDB(ctx, req, pdb) + if err != nil { + return requeueN, nil + } + } + + action := strings.ToUpper(pdb.Spec.Action) + + if pdb.Status.Phase == pdbPhaseReady { + //log.Info("PDB:", "Name", pdb.Name, "Phase", pdb.Status.Phase, "Status", strconv.FormatBool(pdb.Status.Status)) + if (pdb.Status.Action != "") && (action == "MODIFY" || action == "STATUS" || pdb.Status.Action != action) { + pdb.Status.Status = false + } else { + err = r.getPDBState(ctx, req, pdb) + if err != nil { + pdb.Status.Phase = pdbPhaseFail + } else { + pdb.Status.Phase = pdbPhaseReady + pdb.Status.Msg = "Success" + } + r.Status().Update(ctx, pdb) + } + } + + if !pdb.Status.Status { + r.validatePhase(ctx, req, pdb) + phase := pdb.Status.Phase + log.Info("PDB:", "Name", pdb.Name, "Phase", phase, "Status", strconv.FormatBool(pdb.Status.Status)) + + switch phase { + case pdbPhaseCreate: + err = r.createPDB(ctx, req, pdb) + case pdbPhaseClone: + err = r.clonePDB(ctx, req, pdb) + case pdbPhasePlug: + err = r.plugPDB(ctx, req, pdb) + case pdbPhaseUnplug: + err = r.unplugPDB(ctx, req, pdb) + case pdbPhaseModify: + err = r.modifyPDB(ctx, req, pdb) + case pdbPhaseDelete: + err = r.deletePDB(ctx, req, pdb) + case pdbPhaseStatus: + err = r.getPDBState(ctx, req, pdb) + case pdbPhaseMap: + err = r.mapPDB(ctx, req, pdb) + case pdbPhaseFail: + err = r.mapPDB(ctx, req, pdb) + default: + log.Info("DEFAULT:", "Name", pdb.Name, "Phase", phase, "Status", strconv.FormatBool(pdb.Status.Status)) + return requeueN, nil + } + pdb.Status.Action = strings.ToUpper(pdb.Spec.Action) + if err != nil { + pdb.Status.Phase = pdbPhaseFail + } else { + pdb.Status.Phase = pdbPhaseReady + pdb.Status.Msg = "Success" + } + } + + log.Info("Reconcile completed") + return requeueY, nil +} + +/* +************************************************ + - Validate the PDB Spec + /*********************************************** +*/ +func (r *PDBReconciler) validatePhase(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) { + + log := r.Log.WithValues("validatePhase", req.NamespacedName) + + action := strings.ToUpper(pdb.Spec.Action) + + log.Info("Validating PDB phase for: "+pdb.Name, "Action", action) + + switch action { + case "CREATE": + pdb.Status.Phase = pdbPhaseCreate + case "CLONE": + pdb.Status.Phase = pdbPhaseClone + case "PLUG": + pdb.Status.Phase = pdbPhasePlug + case "UNPLUG": + pdb.Status.Phase = pdbPhaseUnplug + case "MODIFY": + pdb.Status.Phase = pdbPhaseModify + case "DELETE": + pdb.Status.Phase = pdbPhaseDelete + case "STATUS": + pdb.Status.Phase = pdbPhaseStatus + case "MAP": + pdb.Status.Phase = pdbPhaseMap + } + + log.Info("Validation complete") +} + +/* +*************************************************************** + - Check for Duplicate PDB. Same PDB name on the same CDB resource. + /************************************************************** +*/ +func (r *PDBReconciler) checkDuplicatePDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + // Name of the CDB CR that holds the ORDS container + cdbResName := pdb.Spec.CDBResName + + log := r.Log.WithValues("checkDuplicatePDB", pdb.Spec.CDBNamespace) + pdbResName := pdb.Spec.PDBName + + pdbList := &dbapi.PDBList{} + + listOpts := []client.ListOption{client.InNamespace(pdb.Spec.CDBNamespace), client.MatchingFields{"spec.pdbName": pdbResName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, pdbList, listOpts...) + if err != nil { + log.Info("Failed to list pdbs", "Namespace", pdb.Spec.CDBNamespace, "Error", err) + return err + } + + if len(pdbList.Items) == 0 { + log.Info("No pdbs found for PDBName: "+pdbResName, "CDBResName", cdbResName) + return nil + } + + for _, p := range pdbList.Items { + log.Info("Found PDB: " + p.Name) + if (p.Name != pdb.Name) && (p.Spec.CDBResName == cdbResName) { + log.Info("Duplicate PDB found") + pdb.Status.Msg = "PDB Resource already exists" + pdb.Status.Status = false + pdb.Status.Phase = pdbPhaseFail + return errors.New("Duplicate PDB found") + } + } + return nil +} + +/* +*************************************************************** + - Get the Custom Resource for the CDB mentioned in the PDB Spec + /************************************************************** +*/ +func (r *PDBReconciler) getCDBResource(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) (dbapi.CDB, error) { + + log := r.Log.WithValues("getCDBResource", req.NamespacedName) + + var cdb dbapi.CDB // CDB CR corresponding to the CDB name specified in the PDB spec + + // Name of the CDB CR that holds the ORDS container + cdbResName := pdb.Spec.CDBResName + cdbNamespace := pdb.Spec.CDBNamespace + + // Get CDB CR corresponding to the CDB name specified in the PDB spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: cdbNamespace, + Name: cdbResName, + }, &cdb) + + if err != nil { + log.Info("Failed to get CRD for CDB", "Name", cdbResName, "Namespace", pdb.Spec.CDBNamespace, "Error", err.Error()) + pdb.Status.Msg = "Unable to get CRD for CDB : " + cdbResName + r.Status().Update(ctx, pdb) + return cdb, err + } + + log.Info("Found CR for CDB", "Name", cdbResName, "CR Name", cdb.Name) + return cdb, nil +} + +/* +*************************************************************** + - Get the ORDS Pod for the CDB mentioned in the PDB Spec + /************************************************************** +*/ +func (r *PDBReconciler) getORDSPod(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) (corev1.Pod, error) { + + log := r.Log.WithValues("getORDSPod", req.NamespacedName) + + var cdbPod corev1.Pod // ORDS Pod container with connection to the concerned CDB + + // Name of the CDB CR that holds the ORDS container + cdbResName := pdb.Spec.CDBResName + + // Get ORDS Pod associated with the CDB Name specified in the PDB Spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: pdb.Spec.CDBNamespace, + Name: cdbResName + "-ords", + }, &cdbPod) + + if err != nil { + log.Info("Failed to get Pod for CDB", "Name", cdbResName, "Namespace", pdb.Spec.CDBNamespace, "Error", err.Error()) + pdb.Status.Msg = "Unable to get ORDS Pod for CDB : " + cdbResName + return cdbPod, err + } + + log.Info("Found ORDS Pod for CDB", "Name", cdbResName, "Pod Name", cdbPod.Name, "ORDS Container hostname", cdbPod.Spec.Hostname) + return cdbPod, nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *PDBReconciler) getSecret(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB, secretName string, keyName string) (string, error) { + + log := r.Log.WithValues("getSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + pdb.Status.Msg = "Secret not found:" + secretName + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + return string(secret.Data[keyName]), nil +} + +/* +************************************************ + - Issue a REST API Call to the ORDS container + +*********************************************** +*/ +func (r *PDBReconciler) callAPI(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB, url string, payload map[string]string, action string) (string, error) { + log := r.Log.WithValues("callAPI", req.NamespacedName) + + var err error + + secret := &corev1.Secret{} + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[pdb.Spec.PDBTlsKey.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCrt.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[pdb.Spec.PDBTlsCrt.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCat.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[pdb.Spec.PDBTlsCat.Secret.Key] + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + pdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, RootCAs: caCertPool} + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + /* + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return "", err + } + */ + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerUsr.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerUsr.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + webUser := string(secret.Data[pdb.Spec.WebServerUsr.Secret.Key]) + webUser = strings.TrimSpace(webUser) + + secret = &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerPwd.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerPwd.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserPwd := string(secret.Data[pdb.Spec.WebServerPwd.Secret.Key]) + webUserPwd = strings.TrimSpace(webUserPwd) + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for PDB : "+pdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to ORDS Pod", "err", err.Error()) + pdb.Status.Msg = "Error: Could not connect to ORDS Pod" + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", errmsg) + return "", err + } + + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "Done", pdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + pdb.Status.ConnString = "" + pdb.Status.Msg = pdb.Spec.PDBName + " not found" + + } else { + if floodcontrol == false { + pdb.Status.Msg = "ORDS Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if floodcontrol == false { + log.Info("ORDS Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr ORDSError + json.Unmarshal([]byte(bb), &apiErr) + if floodcontrol == false { + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", "Failed: %s", apiErr.Message) + } + //fmt.Printf("%+v", apiErr) + //fmt.Println(string(bb)) + floodcontrol = true + return "", errors.New("ORDS Error") + } + floodcontrol = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + //fmt.Println(string(bodyBytes)) + + var apiResponse RESTSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + //fmt.Printf("%#v", apiResponse) + //fmt.Printf("%+v", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("ORDS Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + pdb.Status.Msg = sqlItem.ErrorDetails + } + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} + +/* +************************************************ + - Create a PDB + +*********************************************** +*/ +func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("createPDB", req.NamespacedName) + + var err error + var tdePassword string + var tdeSecret string + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + /*** BEGIN GET ENCPASS ***/ + secret := &corev1.Secret{} + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.AdminName.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.AdminName.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + pdbAdminNameEnc := string(secret.Data[pdb.Spec.AdminName.Secret.Key]) + pdbAdminNameEnc = strings.TrimSpace(pdbAdminNameEnc) + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBPriKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBPriKey.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + privKey := string(secret.Data[pdb.Spec.PDBPriKey.Secret.Key]) + pdbAdminName, err := lrcommons.CommonDecryptWithPrivKey(privKey, pdbAdminNameEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.AdminPwd.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.AdminPwd.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + pdbAdminPwdEnc := string(secret.Data[pdb.Spec.AdminPwd.Secret.Key]) + pdbAdminPwdEnc = strings.TrimSpace(pdbAdminPwdEnc) + pdbAdminPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, pdbAdminPwdEnc, req) + pdbAdminName = strings.TrimSuffix(pdbAdminName, "\n") + pdbAdminPwd = strings.TrimSuffix(pdbAdminPwd, "\n") + /*** END GET ENCPASS ***/ + + log.Info("====================> " + pdbAdminName + ":" + pdbAdminPwd) + /* Prevent creating an existing pdb */ + err = r.getPDBState(ctx, req, pdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check PDB not existence completed", "PDB Name", pdb.Spec.PDBName) + } + + } else { + log.Info("Database already exists ", "PDB Name", pdb.Spec.PDBName) + return nil + } + + values := map[string]string{ + "method": "CREATE", + "pdb_name": pdb.Spec.PDBName, + "adminName": pdbAdminName, + "adminPwd": pdbAdminPwd, + "fileNameConversions": pdb.Spec.FileNameConversions, + "reuseTempFile": strconv.FormatBool(*(pdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(pdb.Spec.UnlimitedStorage)), + "totalSize": pdb.Spec.TotalSize, + "tempSize": pdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + + if *(pdb.Spec.TDEImport) { + tdePassword, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDEPassword.Secret.SecretName, pdb.Spec.TDEPassword.Secret.Key) + if err != nil { + return err + } + tdeSecret, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDESecret.Secret.SecretName, pdb.Spec.TDESecret.Secret.Key) + if err != nil { + return err + } + + tdeSecret = tdeSecret[:len(tdeSecret)-1] + tdePassword = tdeSecret[:len(tdePassword)-1] + values["tdePassword"] = tdePassword + values["tdeKeystorePath"] = pdb.Spec.TDEKeystorePath + values["tdeSecret"] = tdeSecret + } + + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + + pdb.Status.TotalSize = pdb.Spec.TotalSize + pdb.Status.Phase = pdbPhaseCreate + pdb.Status.Msg = "Waiting for PDB to be created" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") + if err != nil { + log.Error(err, "callAPI error", "err", err.Error()) + return err + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Created", "PDB '%s' created successfully", pdb.Spec.PDBName) + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) + } + + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Created", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + log.Info("New connect strinng", "tnsurl", cdb.Spec.DBTnsurl) + log.Info("Created PDB Resource", "PDB Name", pdb.Spec.PDBName) + r.getPDBState(ctx, req, pdb) + return nil +} + +/* +************************************************ + - Clone a PDB + +*********************************************** +*/ +func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + if pdb.Spec.PDBName == pdb.Spec.SrcPDBName { + return nil + } + + log := r.Log.WithValues("clonePDB", req.NamespacedName) + + var err error + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + /* Prevent cloning an existing pdb */ + err = r.getPDBState(ctx, req, pdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check PDB not existence completed", "PDB Name", pdb.Spec.PDBName) + } + + } else { + log.Info("Database already exists ", "PDB Name", pdb.Spec.PDBName) + return nil + } + + values := map[string]string{ + "method": "CLONE", + "clonePDBName": pdb.Spec.PDBName, + "reuseTempFile": strconv.FormatBool(*(pdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(pdb.Spec.UnlimitedStorage)), + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + + if pdb.Spec.SparseClonePath != "" { + values["sparseClonePath"] = pdb.Spec.SparseClonePath + } + if pdb.Spec.FileNameConversions != "" { + values["fileNameConversions"] = pdb.Spec.FileNameConversions + } + if pdb.Spec.TotalSize != "" { + values["totalSize"] = pdb.Spec.TotalSize + } + if pdb.Spec.TempSize != "" { + values["tempSize"] = pdb.Spec.TempSize + } + + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.SrcPDBName + "/" + + pdb.Status.Phase = pdbPhaseClone + pdb.Status.Msg = "Waiting for PDB to be cloned" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") + if err != nil { + return err + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Created", "PDB '%s' cloned successfully", pdb.Spec.PDBName) + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) + } + + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Clone", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + + log.Info("Cloned PDB successfully", "Source PDB Name", pdb.Spec.SrcPDBName, "Clone PDB Name", pdb.Spec.PDBName) + r.getPDBState(ctx, req, pdb) + return nil +} + +/* +************************************************ + - Plug a PDB + +*********************************************** +*/ +func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("plugPDB", req.NamespacedName) + + var err error + var tdePassword string + var tdeSecret string + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "PLUG", + "xmlFileName": pdb.Spec.XMLFileName, + "pdb_name": pdb.Spec.PDBName, + //"adminName": pdbAdminName, + //"adminPwd": pdbAdminPwd, + "sourceFileNameConversions": pdb.Spec.SourceFileNameConversions, + "copyAction": pdb.Spec.CopyAction, + "fileNameConversions": pdb.Spec.FileNameConversions, + "unlimitedStorage": strconv.FormatBool(*(pdb.Spec.UnlimitedStorage)), + "reuseTempFile": strconv.FormatBool(*(pdb.Spec.ReuseTempFile)), + "totalSize": pdb.Spec.TotalSize, + "tempSize": pdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + + if *(pdb.Spec.TDEImport) { + tdePassword, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDEPassword.Secret.SecretName, pdb.Spec.TDEPassword.Secret.Key) + if err != nil { + return err + } + tdeSecret, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDESecret.Secret.SecretName, pdb.Spec.TDESecret.Secret.Key) + if err != nil { + return err + } + + tdeSecret = tdeSecret[:len(tdeSecret)-1] + tdePassword = tdeSecret[:len(tdePassword)-1] + values["tdePassword"] = tdePassword + values["tdeKeystorePath"] = pdb.Spec.TDEKeystorePath + values["tdeSecret"] = tdeSecret + values["tdeImport"] = strconv.FormatBool(*(pdb.Spec.TDEImport)) + } + if *(pdb.Spec.AsClone) { + values["asClone"] = strconv.FormatBool(*(pdb.Spec.AsClone)) + } + + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + + pdb.Status.TotalSize = pdb.Spec.TotalSize + pdb.Status.Phase = pdbPhasePlug + pdb.Status.Msg = "Waiting for PDB to be plugged" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") + if err != nil { + return err + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Created", "PDB '%s' plugged successfully", pdb.Spec.PDBName) + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) + } + + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Plugged", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + + log.Info("Successfully plugged PDB", "PDB Name", pdb.Spec.PDBName) + r.getPDBState(ctx, req, pdb) + return nil +} + +/* +************************************************ + - Unplug a PDB + +*********************************************** +*/ +func (r *PDBReconciler) unplugPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("unplugPDB", req.NamespacedName) + + var err error + var tdePassword string + var tdeSecret string + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "UNPLUG", + "xmlFileName": pdb.Spec.XMLFileName, + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + + if *(pdb.Spec.TDEExport) { + // Get the TDE Password + tdePassword, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDEPassword.Secret.SecretName, pdb.Spec.TDEPassword.Secret.Key) + if err != nil { + return err + } + tdeSecret, err = r.getSecret(ctx, req, pdb, pdb.Spec.TDESecret.Secret.SecretName, pdb.Spec.TDESecret.Secret.Key) + if err != nil { + return err + } + + tdeSecret = tdeSecret[:len(tdeSecret)-1] + tdePassword = tdeSecret[:len(tdePassword)-1] + values["tdePassword"] = tdePassword + values["tdeKeystorePath"] = pdb.Spec.TDEKeystorePath + values["tdeSecret"] = tdeSecret + values["tdeExport"] = strconv.FormatBool(*(pdb.Spec.TDEExport)) + } + + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.PDBName + "/" + + log.Info("CallAPI(url)", "url", url) + + pdb.Status.Phase = pdbPhaseUnplug + pdb.Status.Msg = "Waiting for PDB to be unplugged" + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) + } + + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") + if err != nil { + return err + } + + if controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(pdb, PDBFinalizer) + err := r.Update(ctx, pdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + pdb.Status.Status = true + err = r.Delete(context.Background(), pdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete PDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Unplugged", "PDB '%s' unplugged successfully", pdb.Spec.PDBName) + + log.Info("Successfully unplugged PDB resource") + return nil +} + +/* +************************************************ + - Modify a PDB state + /*********************************************** +*/ +func (r *PDBReconciler) modifyPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("modifyPDB", req.NamespacedName) + + var err error + + err = r.getPDBState(ctx, req, pdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Warning PDB does not exist", "PDB Name", pdb.Spec.PDBName) + return nil + } + return err + } + + if pdb.Status.OpenMode == "READ WRITE" && pdb.Spec.PDBState == "OPEN" && pdb.Spec.ModifyOption == "READ WRITE" { + /* Database is already open no action required */ + return nil + } + + if pdb.Status.OpenMode == "MOUNTED" && pdb.Spec.PDBState == "CLOSE" && pdb.Spec.ModifyOption == "IMMEDIATE" { + /* Database is already close no action required */ + return nil + } + + // To prevent Reconcile from Modifying again whenever the Operator gets re-started + /* + modOption := pdb.Spec.PDBState + "-" + pdb.Spec.ModifyOption + if pdb.Status.ModifyOption == modOption { + return nil + } + */ + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + values := map[string]string{ + "state": pdb.Spec.PDBState, + "modifyOption": pdb.Spec.ModifyOption, + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + log.Info("MODIFY PDB", "pdb.Spec.PDBState=", pdb.Spec.PDBState, "pdb.Spec.ModifyOption=", pdb.Spec.ModifyOption) + log.Info("PDB STATUS OPENMODE", "pdb.Status.OpenMode=", pdb.Status.OpenMode) + + pdbName := pdb.Spec.PDBName + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" + + pdb.Status.Phase = pdbPhaseModify + pdb.Status.ModifyOption = pdb.Spec.PDBState + "-" + pdb.Spec.ModifyOption + pdb.Status.Msg = "Waiting for PDB to be modified" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") + if err != nil { + return err + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Modified", "PDB '%s' modified successfully", pdb.Spec.PDBName) + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + } + + log.Info("Successfully modified PDB state", "PDB Name", pdb.Spec.PDBName) + r.getPDBState(ctx, req, pdb) + return nil +} + +/* +************************************************ + - Get PDB State + /*********************************************** +*/ +func (r *PDBReconciler) getPDBState(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("getPDBState", req.NamespacedName) + + var err error + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + pdbName := pdb.Spec.PDBName + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" + + pdb.Status.Msg = "Getting PDB state" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + + respData, err := NewCallApi(r, ctx, req, pdb, url, nil, "GET") + + if err != nil { + pdb.Status.OpenMode = "UNKNOWN" + pdb.Status.Msg = "CHECK PDB STATUS" + pdb.Status.Status = false + return err + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed to get state of PDB :"+pdbName, "err", err.Error()) + } + + pdb.Status.OpenMode = objmap["open_mode"].(string) + + if pdb.Status.OpenMode == "READ WRITE" { + err := r.mapPDB(ctx, req, pdb) + if err != nil { + log.Info("Fail to Map resource getting PDB state") + } + } + + log.Info("Successfully obtained PDB state", "PDB Name", pdb.Spec.PDBName, "State", objmap["open_mode"].(string)) + return nil +} + +/* +************************************************ + - Map Database PDB to Kubernetes PDB CR + /*********************************************** +*/ +func (r *PDBReconciler) mapPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("mapPDB", req.NamespacedName) + + var err error + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + pdbName := pdb.Spec.PDBName + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" + + pdb.Status.Msg = "Mapping PDB" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + + respData, err := NewCallApi(r, ctx, req, pdb, url, nil, "GET") + + if err != nil { + pdb.Status.OpenMode = "UNKNOWN" + return err + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed to get state of PDB :"+pdbName, "err", err.Error()) + } + + totSizeInBytes := objmap["total_size"].(float64) + totSizeInGB := totSizeInBytes / 1024 / 1024 / 1024 + + pdb.Status.OpenMode = objmap["open_mode"].(string) + pdb.Status.TotalSize = fmt.Sprintf("%.2f", totSizeInGB) + "G" + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Mapped", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + + if cdb.Spec.DBServer != "" { + pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName + } else { + pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) + } + + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + + log.Info("Successfully mapped PDB to Kubernetes resource", "PDB Name", pdb.Spec.PDBName) + return nil +} + +/* +************************************************ + - Delete a PDB + /*********************************************** +*/ +func (r *PDBReconciler) deletePDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("deletePDB", req.NamespacedName) + + err := r.deletePDBInstance(req, ctx, pdb) + if err != nil { + log.Info("Could not delete PDB", "PDB Name", pdb.Spec.PDBName, "err", err.Error()) + return err + } + + if controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(pdb, PDBFinalizer) + err := r.Update(ctx, pdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + pdb.Status.Status = true + err = r.Delete(context.Background(), pdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete PDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Deleted", "PDB '%s' dropped successfully", pdb.Spec.PDBName) + + log.Info("Successfully deleted PDB resource") + return nil +} + +/************************************************* + - Check PDB deletion +**************************************************/ + +func (r *PDBReconciler) managePDBDeletion2(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + log := r.Log.WithValues("managePDBDeletion", req.NamespacedName) + if pdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { + controllerutil.AddFinalizer(pdb, PDBFinalizer) + if err := r.Update(ctx, pdb); err != nil { + return err + } + } + } else { + log.Info("Pdb marked to be delted") + if controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { + if assertivePdbDeletion == true { + log.Info("Deleting pdb CRD: Assertive approach is turned on ") + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + log.Error(err, "Cannont find cdb resource ", "err", err.Error()) + return err + } + + var errclose error + pdbName := pdb.Spec.PDBName + if pdb.Status.OpenMode == "READ WRITE" { + valuesclose := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE"} + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" + _, errclose = NewCallApi(r, ctx, req, pdb, url, valuesclose, "POST") + if errclose != nil { + log.Info("Warning error closing pdb continue anyway") + } + } + + if errclose == nil { + valuesdrop := map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE"} + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" + + log.Info("Call Delete()") + _, errdelete := NewCallApi(r, ctx, req, pdb, url, valuesdrop, "DELETE") + if errdelete != nil { + log.Error(errdelete, "Fail to delete pdb :"+pdb.Name, "err", errdelete.Error()) + return errdelete + } + } + + } /* END OF ASSERTIVE SECTION */ + + log.Info("Marked to be deleted") + pdb.Status.Phase = pdbPhaseDelete + pdb.Status.Status = true + r.Status().Update(ctx, pdb) + + controllerutil.RemoveFinalizer(pdb, PDBFinalizer) + if err := r.Update(ctx, pdb); err != nil { + log.Info("Cannot remove finalizer") + return err + } + + } + + return nil + } + + return nil +} + +/* +************************************************ + - Finalization logic for PDBFinalizer + /*********************************************** +*/ +func (r *PDBReconciler) deletePDBInstance(req ctrl.Request, ctx context.Context, pdb *dbapi.PDB) error { + + log := r.Log.WithValues("deletePDBInstance", req.NamespacedName) + + var err error + + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return err + } + + values := map[string]string{ + "action": "KEEP", + "getScript": strconv.FormatBool(*(pdb.Spec.GetScript))} + + if pdb.Spec.DropAction != "" { + values["action"] = pdb.Spec.DropAction + } + + pdbName := pdb.Spec.PDBName + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" + + pdb.Status.Phase = pdbPhaseDelete + pdb.Status.Msg = "Waiting for PDB to be deleted" + if err := r.Status().Update(ctx, pdb); err != nil { + log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) + } + _, err = NewCallApi(r, ctx, req, pdb, url, values, "DELETE") + if err != nil { + pdb.Status.ConnString = "" + return err + } + + log.Info("Successfully dropped PDB", "PDB Name", pdbName) + return nil +} + +/* +************************************************************* + - SetupWithManager sets up the controller with the Manager. + /************************************************************ +*/ +func (r *PDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.PDB{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} + +/************************************************************* +Enh 35357707 - PROVIDE THE PDB TNSALIAS INFORMATION +**************************************************************/ + +func ParseTnsAlias(tns *string, pdbsrv *string) { + var swaptns string + fmt.Printf("Analyzing string [%s]\n", *tns) + fmt.Printf("Relacing srv [%s]\n", *pdbsrv) + + if strings.Contains(strings.ToUpper(*tns), "SERVICE_NAME") == false { + fmt.Print("Cannot generate tns alias for pdb") + return + } + + if strings.Contains(strings.ToUpper(*tns), "ORACLE_SID") == true { + fmt.Print("Cannot generate tns alias for pdb") + return + } + + swaptns = fmt.Sprintf("SERVICE_NAME=%s", *pdbsrv) + tnsreg := regexp.MustCompile(`SERVICE_NAME=\w+`) + *tns = tnsreg.ReplaceAllString(*tns, swaptns) + + fmt.Printf("Newstring [%s]\n", *tns) + +} + +func NewCallApi(intr interface{}, ctx context.Context, req ctrl.Request, pdb *dbapi.PDB, url string, payload map[string]string, action string) (string, error) { + + var c client.Client + var r logr.Logger + var e record.EventRecorder + var err error + + recpdb, ok1 := intr.(*PDBReconciler) + if ok1 { + fmt.Printf("func NewCallApi ((*PDBReconciler),......)\n") + c = recpdb.Client + e = recpdb.Recorder + r = recpdb.Log + } + + reccdb, ok2 := intr.(*CDBReconciler) + if ok2 { + fmt.Printf("func NewCallApi ((*CDBReconciler),......)\n") + c = reccdb.Client + e = reccdb.Recorder + r = reccdb.Log + } + + secret := &corev1.Secret{} + + log := r.WithValues("NewCallApi", req.NamespacedName) + log.Info("Call c.Get") + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[pdb.Spec.PDBTlsKey.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCrt.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[pdb.Spec.PDBTlsCrt.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCat.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[pdb.Spec.PDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaCertPEM)) + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + pdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, RootCAs: caCertPool} + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + /* + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return "", err + } + */ + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerUsr.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerUsr.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserEnc := string(secret.Data[pdb.Spec.WebServerUsr.Secret.Key]) + webUserEnc = strings.TrimSpace(webUserEnc) + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBPriKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBPriKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + privKey := string(secret.Data[pdb.Spec.PDBPriKey.Secret.Key]) + webUser, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerPwd.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerPwd.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserPwdEnc := string(secret.Data[pdb.Spec.WebServerPwd.Secret.Key]) + webUserPwdEnc = strings.TrimSpace(webUserPwdEnc) + webUserPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserPwdEnc, req) + /////////////////////////////////////////////////////////////////////////////////// + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for PDB : "+pdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to ORDS Pod", "err", err.Error()) + pdb.Status.Msg = "Error: Could not connect to ORDS Pod" + e.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", errmsg) + return "", err + } + + e.Eventf(pdb, corev1.EventTypeWarning, "Done", pdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + pdb.Status.ConnString = "" + pdb.Status.Msg = pdb.Spec.PDBName + " not found" + + } else { + if floodcontrol == false { + pdb.Status.Msg = "ORDS Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if floodcontrol == false { + log.Info("ORDS Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr ORDSError + json.Unmarshal([]byte(bb), &apiErr) + if floodcontrol == false { + e.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", "Failed: %s", apiErr.Message) + } + //fmt.Printf("%+v", apiErr) + //fmt.Println(string(bb)) + floodcontrol = true + return "", errors.New("ORDS Error") + } + floodcontrol = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + //fmt.Println(string(bodyBytes)) + + var apiResponse RESTSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + //fmt.Printf("%#v", apiResponse) + //fmt.Printf("%+v", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("ORDS Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + pdb.Status.Msg = sqlItem.ErrorDetails + } + e.Eventf(pdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} diff --git a/controllers/database/shardingdatabase_controller.go b/controllers/database/shardingdatabase_controller.go index 73575df7..1ec77253 100644 --- a/controllers/database/shardingdatabase_controller.go +++ b/controllers/database/shardingdatabase_controller.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -44,11 +44,12 @@ import ( "fmt" "reflect" "strconv" + "strings" "time" "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/ons" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/ons" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -66,18 +67,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" shardingv1 "github.com/oracle/oracle-database-operator/commons/sharding" ) -//Sharding Topology -type ShardingTopology struct { - topicid string - Instance *databasev1alpha1.ShardingDatabase - deltopology bool - onsProvider common.ConfigurationProvider - onsProviderFlag bool - rclient ons.NotificationDataPlaneClient +// Struct keeping Oracle Notification Server Info +type OnsStatus struct { + Topicid string `json:"topicid,omitempty"` + Instance *databasev4.ShardingDatabase `json:"instance,omitempty"` + OnsProvider common.ConfigurationProvider `json:"onsProvider,omitempty"` + OnsProviderFlag bool `json:"onsProviderFlag,omitempty"` + Rclient ons.NotificationDataPlaneClient `json:"rclient,omitempty"` } // ShardingDatabaseReconciler reconciles a ShardingDatabase object @@ -88,13 +88,19 @@ type ShardingDatabaseReconciler struct { kubeClient kubernetes.Interface kubeConfig clientcmd.ClientConfig Recorder record.EventRecorder - osh []*ShardingTopology + InCluster bool + Namespace string } +var sentFailMsg = make(map[string]bool) +var sentCompleteMsg = make(map[string]bool) + +var oshMap = make(map[string]*OnsStatus) + // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/status,verbs=get;update;patch // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods;pods/log;pods/exec;secrets;services;events;nodes;configmaps;persistentvolumeclaims;namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods;pods/log;pods/exec;secrets;containers;services;events;configmaps;persistentvolumeclaims;namespaces,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups='',resources=statefulsets/finalizers,verbs=get;list;watch;create;update;patch;delete @@ -114,10 +120,10 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // your logic here var i int32 - //var ShardImageLatest []databasev1alpha1.ShardSpec - var OraCatalogSpex databasev1alpha1.CatalogSpec - var OraShardSpex databasev1alpha1.ShardSpec - var OraGsmSpex databasev1alpha1.GsmSpec + //var ShardImageLatest []databasev4.ShardSpec + var OraCatalogSpex databasev4.CatalogSpec + var OraShardSpex databasev4.ShardSpec + var OraGsmSpex databasev4.GsmSpec var result ctrl.Result var isShardTopologyDeleteTrue bool = false //var msg string @@ -126,6 +132,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req resultNq := ctrl.Result{Requeue: false} resultQ := ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second} var nilErr error = nil + var msg string // On every reconcile, we will call setCrdLifeCycleState // To understand this, please refer https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/ @@ -139,7 +146,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req } } // Fetch the ProvShard instance - instance := &databasev1alpha1.ShardingDatabase{} + instance := &databasev4.ShardingDatabase{} err = r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { @@ -152,16 +159,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } - idx, instFlag := r.checkProvInstance(instance) - // assinging osh instance + instFlag := r.checkProvInstance(instance) if !instFlag { - // Sharding Topolgy Struct Assignment - // ====================================== - osh := &ShardingTopology{} - osh.Instance = instance - r.osh = append(r.osh, osh) + oshMap[instance.Name] = &OnsStatus{} + oshMap[instance.Name].Instance = instance } defer r.setCrdLifeCycleState(instance, &result, &err, &stateType) + defer r.updateShardTopologyStatus(instance) // =============================== Check Deletion TimeStamp======== // Check if the ProvOShard instance is marked to be deleted, which is // // indicated by the deletion timestamp being set. @@ -179,30 +183,20 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req } // ======== Setting the flag and Index to be used later in this function ======== - idx, instFlag = r.checkProvInstance(instance) - if !instFlag { - //r.setCrdLifeCycleState(instance, &result, &err, stateType) - result = resultNq - return result, fmt.Errorf("DId not fid the instance in checkProvInstance") - } + // instFlag = r.checkProvInstance(instance) + // if !instFlag { + //r.setCrdLifeCycleState(instance, &result, &err, stateType) + //// result = resultNq + // return result, fmt.Errorf("DId not find the instance in checkProvInstance") + // } // ================================ OCI Notification Provider =========== - r.getOnsConfigProvider(instance, idx) + r.getOnsConfigProvider(instance) // =============================== Checking Namespace ============== - if instance.Spec.Namespace != "" { - err = shardingv1.AddNamespace(instance, r.Client, r.Log) - if err != nil { - //r.setCrdLifeCycleState(instance, &result, &err, stateType) - result = resultNq - return result, err - } - } else { - instance.Spec.Namespace = "default" - } // ======================== Validate Specs ============== - err = r.validateSpex(instance, idx) + err = r.validateSpex(instance) if err != nil { //r.setCrdLifeCycleState(instance, &result, &err, stateType) result = resultNq @@ -231,6 +225,12 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req if len(instance.Spec.Catalog) > 0 { for i = 0; i < int32(len(instance.Spec.Catalog)); i++ { OraCatalogSpex = instance.Spec.Catalog[i] + if len(OraCatalogSpex.Name) > 9 { + msg = "Catalog Name cannot be greater than 9 characters." + err = fmt.Errorf(msg) + result = resultNq + return result, err + } // See if StatefulSets already exists and create if it doesn't result, err = r.deployStatefulSet(instance, shardingv1.BuildStatefulSetForCatalog(instance, OraCatalogSpex), "CATALOG") if err != nil { @@ -282,7 +282,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // if user set replicasize greater than 1 but also set instance.Spec.OraDbPvcName then only one service will be created and one pod for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if len(OraShardSpex.Name) > 9 { + msg = "Shard Name cannot be greater than 9 characters." + err = fmt.Errorf(msg) + result = resultNq + return result, err + } + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { result, err = r.createService(instance, shardingv1.BuildServiceDefForShard(instance, 0, OraShardSpex, "local")) if err != nil { result = resultNq @@ -302,7 +308,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { result, err = r.deployStatefulSet(instance, shardingv1.BuildStatefulSetForShard(instance, OraShardSpex), "SHARD") if err != nil { result = resultNq @@ -323,11 +329,18 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req return result, err } + err = r.checkShardState(instance) + if err != nil { + err = nilErr + result = resultQ + return result, err + } + //set the Waiting state for Reconcile loop // Loop will be requeued only if Shard Statefulset is not ready or not configured. // Till that time Reconcilation loop will remain in blocked state // if the err is return because of Shard is not ready then blocked state is rmeoved and reconcilation state is set - err = r.addPrimaryShards(instance, idx) + err = r.addPrimaryShards(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -338,7 +351,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // Loop will be requeued only if Standby Shard Statefulset is not ready or not configured. // Till that time Reconcilation loop will remain in blocked state // if the err is return because of Shard is not ready then blocked state is rmeoved and reconcilation state is - err = r.addStandbyShards(instance, idx) + err = r.addStandbyShards(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -348,7 +361,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // we don't need to run the requeue loop but still putting this condition to address any unkown situation // delShard function set the state to blocked and we do not allow any other operationn while delete is going on - err = r.delGsmShard(instance, idx) + err = r.delGsmShard(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -361,13 +374,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req OraCatalogSpex = instance.Spec.Catalog[i] sfSet, catalogPod, err := r.validateInvidualCatalog(instance, OraCatalogSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Catalog "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Catalog "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForCatalog(instance, OraCatalogSpex, r.Client, sfSet, catalogPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during catalog update operation.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during catalog update operation.", nil, instance, r.Log) result = resultNq return result, err } @@ -376,16 +389,16 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // ====================== Update Setup for Shard ============================== for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { sfSet, shardPod, err := r.validateShard(instance, OraShardSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Shard "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Shard "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForShard(instance, OraShardSpex, r.Client, sfSet, shardPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during shard update operation..", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during shard update operation..", nil, instance, r.Log) result = resultNq return result, err } @@ -397,31 +410,19 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req OraGsmSpex = instance.Spec.Gsm[i] sfSet, gsmPod, err := r.validateInvidualGsm(instance, OraGsmSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Gsm "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Gsm "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForGsm(instance, OraGsmSpex, r.Client, sfSet, gsmPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during GSM update operation.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during GSM update operation.", nil, instance, r.Log) result = resultNq return result, err } } - // Calling updateShardTopology to update the entire sharding topology - // This is required because we just executed updateShard,updateCatalog and UpdateGsm - // If some state has changed it will update the topology - - err = r.updateShardTopologyStatus(instance) - if err != nil { - // time.Sleep(30 * time.Second) - result = resultQ - err = nilErr - return result, err - } - - stateType = string(databasev1alpha1.CrdReconcileCompeleteState) + stateType = string(databasev4.CrdReconcileCompeleteState) // r.setCrdLifeCycleState(instance, &result, &err, stateType) // Set error to ni to avoid reconcilation state reconcilation error as we are passing err to setCrdLifeCycleState @@ -436,10 +437,11 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // Check https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#Options to under MaxConcurrentReconciles func (r *ShardingDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&databasev1alpha1.ShardingDatabase{}). + For(&databasev4.ShardingDatabase{}). Owns(&appsv1.StatefulSet{}). Owns(&corev1.Service{}). Owns(&corev1.Pod{}). + Owns(&corev1.Secret{}). WithEventFilter(r.eventFilterPredicate()). WithOptions(controller.Options{MaxConcurrentReconciles: 50}). //MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1 Complete(r) @@ -453,85 +455,131 @@ func (r *ShardingDatabaseReconciler) eventFilterPredicate() predicate.Predicate return true }, UpdateFunc: func(e event.UpdateEvent) bool { + instance := &databasev4.ShardingDatabase{} + if old, ok := e.ObjectOld.(*corev1.Secret); ok { + if new, ok := e.ObjectNew.(*corev1.Secret); ok { + oshInst := instance + if (new.Name == oshInst.Spec.DbSecret.Name) && (new.Name == old.Name) { + _, ok := old.Data[oshInst.Spec.DbSecret.PwdFileName] + if ok { + if !reflect.DeepEqual(old.Data[oshInst.Spec.DbSecret.PwdFileName], new.Data[oshInst.Spec.DbSecret.PwdFileName]) { + shardingv1.LogMessages("INFO", "Secret Changed", nil, oshInst, r.Log) + } + } + shardingv1.LogMessages("INFO", "Secret update block", nil, oshInst, r.Log) + } + } + } return true }, DeleteFunc: func(e event.DeleteEvent) bool { + instance := &databasev4.ShardingDatabase{} _, podOk := e.Object.GetLabels()["statefulset.kubernetes.io/pod-name"] - for i := 0; i < len(r.osh); i++ { - if r.osh[i] != nil { - oshInst := r.osh[i] - if oshInst.deltopology == true { - break + if oshMap[instance.Name] != nil { + oshInst := instance + if instance.DeletionTimestamp == nil { - } - if e.Object.GetLabels()[string(databasev1alpha1.ShardingDelLabelKey)] == string(databasev1alpha1.ShardingDelLabelTrueValue) { - break + if e.Object.GetLabels()[string(databasev4.ShardingDelLabelKey)] == string(databasev4.ShardingDelLabelTrueValue) { } if podOk { delObj := e.Object.(*corev1.Pod) - if e.Object.GetLabels()["type"] == "Shard" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Instance.Name { + if e.Object.GetLabels()["type"] == "Shard" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Name { if delObj.DeletionTimestamp != nil { - go r.gsmInvitedNodeOp(oshInst.Instance, delObj.Name) + go r.gsmInvitedNodeOp(oshInst, delObj.Name) } } - if e.Object.GetLabels()["type"] == "Catalog" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Instance.Name { + if e.Object.GetLabels()["type"] == "Catalog" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Name { if delObj.DeletionTimestamp != nil { - go r.gsmInvitedNodeOp(oshInst.Instance, delObj.Name) + go r.gsmInvitedNodeOp(oshInst, delObj.Name) } } - } - } } - return true }, } } +// ================ Function to check secret update============= +func (r *ShardingDatabaseReconciler) UpdateSecret(instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) (ctrl.Result, error) { + + sc := &corev1.Secret{} + //var err error + + // Reading a Secret + var err error = kClient.Get(context.TODO(), types.NamespacedName{ + Name: instance.Spec.DbSecret.Name, + Namespace: instance.Namespace, + }, sc) + + if err != nil { + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + // ================== Function to get the Notification controller ============== -func (r *ShardingDatabaseReconciler) getOnsConfigProvider(instance *databasev1alpha1.ShardingDatabase, idx int, -) { +func (r *ShardingDatabaseReconciler) getOnsConfigProvider(instance *databasev4.ShardingDatabase) { var err error - if instance.Spec.NsConfigMap != "" && instance.Spec.NsSecret != "" && r.osh[idx].onsProviderFlag != true { - cmName := instance.Spec.NsConfigMap - secName := instance.Spec.NsSecret + if instance.Spec.DbSecret.NsConfigMap != "" && instance.Spec.DbSecret.NsSecret != "" && oshMap[instance.Name].OnsProviderFlag != true { + cmName := instance.Spec.DbSecret.NsConfigMap + secName := instance.Spec.DbSecret.NsSecret shardingv1.LogMessages("DEBUG", "Received parameters are "+shardingv1.GetFmtStr(cmName)+","+shardingv1.GetFmtStr(secName), nil, instance, r.Log) region, user, tenancy, passphrase, fingerprint, topicid := shardingv1.ReadConfigMap(cmName, instance, r.Client, r.Log) privatekey := shardingv1.ReadSecret(secName, instance, r.Client, r.Log) - r.osh[idx].topicid = topicid - r.osh[idx].onsProvider = common.NewRawConfigurationProvider(tenancy, user, region, fingerprint, privatekey, &passphrase) - r.osh[idx].rclient, err = ons.NewNotificationDataPlaneClientWithConfigurationProvider(r.osh[idx].onsProvider) + + oshMap[instance.Name].Topicid = topicid + oshMap[instance.Name].OnsProvider = common.NewRawConfigurationProvider(tenancy, user, region, fingerprint, privatekey, &passphrase) + //VV instance.Spec.TopicId = topicid + oshMap[instance.Name].Rclient, err = ons.NewNotificationDataPlaneClientWithConfigurationProvider(oshMap[instance.Name].OnsProvider) if err != nil { msg := "Error occurred in getting the OCI notification service based client." - r.osh[idx].onsProviderFlag = false + oshMap[instance.Name].OnsProviderFlag = false r.Log.Error(err, msg) shardingv1.LogMessages("Error", msg, nil, instance, r.Log) } else { - r.osh[idx].onsProviderFlag = true + oshMap[instance.Name].OnsProviderFlag = true } + } +} +func (r ShardingDatabaseReconciler) marshalOnsInfo(instance *databasev4.ShardingDatabase) (OnsStatus, error) { + onsData := OnsStatus{} + specBytes, err := instance.GetLastSuccessfulOnsInfo() + if err != nil { + shardingv1.LogMessages("Error", "error occurred while getting the data from getLastSuccessfulOnsInfo", nil, instance, r.Log) + return onsData, err + } else { + shardingv1.LogMessages("Error", "error occurred while getting the data from getLastSuccessfulOnsInfo and unmarshaling the object", nil, instance, r.Log) + err := json.Unmarshal(specBytes, &onsData) + if err != nil { + return onsData, err + } } + return onsData, nil } // ================== Function the Message ============== -func (r *ShardingDatabaseReconciler) sendMessage(instance *databasev1alpha1.ShardingDatabase, title string, body string) { - idx, instFlag := r.checkProvInstance(instance) +func (r *ShardingDatabaseReconciler) sendMessage(instance *databasev4.ShardingDatabase, title string, body string) { + instFlag := r.checkProvInstance(instance) if instFlag { - if r.osh[idx].onsProviderFlag { - shardingv1.SendNotification(title, body, instance, r.osh[idx].topicid, r.osh[idx].rclient, r.Log) + shardingv1.LogMessages("INFO", "sendMessage():instFlag true", nil, instance, r.Log) + if oshMap[instance.Name].OnsProviderFlag { + shardingv1.LogMessages("INFO", "sendMessage():OnsProviderFlag true", nil, instance, r.Log) + shardingv1.SendNotification(title, body, instance, oshMap[instance.Name].Topicid, oshMap[instance.Name].Rclient, r.Log) } } } -func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev1alpha1.ShardingDatabase, eventMsg string, state string) { +func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev4.ShardingDatabase, eventMsg string, state string) { - if state == string(databasev1alpha1.AvailableState) || state == string(databasev1alpha1.AddingShardState) || state == string(databasev1alpha1.ShardOnlineState) || state == string(databasev1alpha1.ProvisionState) || state == string(databasev1alpha1.DeletingState) || state == string(databasev1alpha1.Terminated) { + if state == string(databasev4.AvailableState) || state == string(databasev4.AddingShardState) || state == string(databasev4.ShardOnlineState) || state == string(databasev4.ProvisionState) || state == string(databasev4.DeletingState) || state == string(databasev4.Terminated) { r.Recorder.Eventf(instance, corev1.EventTypeNormal, "State Change", eventMsg) } else { r.Recorder.Eventf(instance, corev1.EventTypeWarning, "State Change", eventMsg) @@ -541,7 +589,7 @@ func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev1alpha1.Sh } // ================== Function to check insytance deletion timestamp and activate the finalizer code ======== -func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance *databasev4.ShardingDatabase, ) (error, bool) { isProvOShardToBeDeleted := instance.GetDeletionTimestamp() != nil @@ -564,7 +612,7 @@ func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance } // Send true because delete is in progress and it is a custom delete message // We don't need to print custom err stack as we are deleting the topology - return fmt.Errorf("Delete of the sharding topology is in progress"), true + return fmt.Errorf("delete of the sharding topology is in progress"), true } // Add finalizer for this CR @@ -580,8 +628,8 @@ func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance } // ========================== FInalizer Section =================== -func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev1alpha1.ShardingDatabase) error { - reqLogger := r.Log.WithValues("instance.Spec.Namespace", instance.Spec.Namespace, "instance.Name", instance.Name) +func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev4.ShardingDatabase) error { + reqLogger := r.Log.WithValues("instance.Namespace", instance.Namespace, "instance.Name", instance.Name) controllerutil.AddFinalizer(instance, shardingv1.ShardingDatabaseFinalizer) // Update CR @@ -593,7 +641,7 @@ func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev1alpha1.Sha return nil } -func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *databasev4.ShardingDatabase) error { // TODO(user): Add the cleanup steps that the operator needs to do before the CR // can be deleted. Examples of finalizers include performing backups and deleting // resources that are not owned by this CR, like a PVC. @@ -602,10 +650,9 @@ func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *database var err error var pvcName string - idx, _ := r.checkProvInstance(instance) + r.checkProvInstance(instance) sfSetFound := &appsv1.StatefulSet{} svcFound := &corev1.Service{} - r.osh[idx].deltopology = true if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] @@ -792,44 +839,33 @@ func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *database } } - r.osh[idx].deltopology = false - //r.osh[idx].addSem.Release(1) - //r.osh[idx].delSem.Release(1) - //instance1 := &shardingv1alpha1.ProvShard{} - r.osh[idx].Instance = &databasev1alpha1.ShardingDatabase{} - - //r.osh[idx] = nil + oshMap[instance.Name].Instance = &databasev4.ShardingDatabase{} return nil } -//============== - // Get the current instance -func (r *ShardingDatabaseReconciler) checkProvInstance(instance *databasev1alpha1.ShardingDatabase, -) (int, bool) { +func (r *ShardingDatabaseReconciler) checkProvInstance(instance *databasev4.ShardingDatabase, +) bool { var status bool = false - var idx int - for i := 0; i < len(r.osh); i++ { - idx = i - if r.osh[i] != nil { - if !r.osh[i].deltopology { - if r.osh[i].Instance.Name == instance.Name { - status = true - break - } - } + if oshMap[instance.Name] != nil { + title := "checkProvInstance()" + message := "oshMap.Instance.Name=[" + oshMap[instance.Name].Instance.Name + "]. instance.Name=[" + instance.Name + "]." + shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) + if oshMap[instance.Name].Instance.Name == instance.Name { + status = true } } - return idx, status + return status } // =========== validate Specs ============ -func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev4.ShardingDatabase) error { var eventMsg string var eventErr string = "Spec Error" + var i int32 lastSuccSpec, err := instance.GetLastSuccessfulSpec() if err != nil { @@ -840,6 +876,27 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha if lastSuccSpec == nil { // Logic to check if inital Spec is good or not + err = r.checkShardingType(instance) + if err != nil { + return err + } + + if len(instance.Spec.Shard) > 0 { + for i = 0; i < int32(len(instance.Spec.Shard)); i++ { + OraShardSpex := instance.Spec.Shard[i] + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { + err = r.checkShardSpace(instance, OraShardSpex) + if err != nil { + return err + } + err = r.checkShardGroup(instance, OraShardSpex) + if err != nil { + return err + } + } + } + } + // Once the initial Spec is been validated then update the last Sucessful Spec err = instance.UpdateLastSuccessfulSpec(r.Client) if err != nil { @@ -848,11 +905,6 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha } else { // if the last sucessful spec is not nil // check the parameters which cannot be changed - if lastSuccSpec.Namespace != instance.Spec.Namespace { - eventMsg = "ShardingDatabase CRD resource " + shardingv1.GetFmtStr(instance.Name) + " namespace changed from " + shardingv1.GetFmtStr(lastSuccSpec.Namespace) + " to " + shardingv1.GetFmtStr(instance.Spec.Namespace) + ". This change is not allowed." - r.Recorder.Eventf(instance, corev1.EventTypeWarning, eventErr, eventMsg) - return fmt.Errorf("instance spec has changed and namespace change is not supported") - } if lastSuccSpec.DbImage != instance.Spec.DbImage { eventMsg = "ShardingDatabase CRD resource " + shardingv1.GetFmtStr(instance.Name) + " DBImage changed from " + shardingv1.GetFmtStr(lastSuccSpec.DbImage) + " to " + shardingv1.GetFmtStr(instance.Spec.DbImage) + ". This change is not allowed." @@ -874,24 +926,83 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha // Compare Env variables for shard begins here if !r.comapreShardEnvVariables(instance, lastSuccSpec) { - return fmt.Errorf("Change of Shard env variables are not") + return fmt.Errorf("change of Shard env variables are not") } // Compare Env variables for catalog begins here if !r.comapreCatalogEnvVariables(instance, lastSuccSpec) { - return fmt.Errorf("Change of Catalog env variables are not") + return fmt.Errorf("change of Catalog env variables are not") } // Compare env variable for Catalog ends here if !r.comapreGsmEnvVariables(instance, lastSuccSpec) { - return fmt.Errorf("Change of GSM env variables are not") + return fmt.Errorf("change of GSM env variables are not") } } return nil } +func (r *ShardingDatabaseReconciler) checkShardingType(instance *databasev4.ShardingDatabase) error { + var i, k int32 + var regionFlag bool + + for k = 0; k < int32(len(instance.Spec.Gsm)); k++ { + regionFlag = false + for i = 0; i < int32(len(instance.Spec.Shard)); i++ { + if instance.Spec.Gsm[k].Region == instance.Spec.Shard[i].ShardRegion { + regionFlag = true + } + } + if !regionFlag { + msg := instance.Spec.Gsm[k].Region + " does not match with any region with Shard region. Region will be created during shard director provisioning" + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + } + } + + return nil +} + +// Check the ShardGroups/ Shard Space and Shard group Name +// checkShrdGSR is Shardgroup/ShardSpace/ShardRegion + +func (r *ShardingDatabaseReconciler) checkShardSpace(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) error { + + if instance.Spec.ShardingType != "" { + // Check for the Sharding Type and if it is USER do following + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) == "USER" { + if len(OraShardSpex.ShardRegion) == 0 { + return fmt.Errorf("Shard region cannot be empty! ") + } + if len(OraShardSpex.ShardSpace) == 0 { + return fmt.Errorf("Shard Space in " + OraShardSpex.Name + " cannot be empty") + } + } + } + return nil +} + +// Check the ShardGroups/ Shard Space and Shard group Name +// checkShrdGSR is Shardgroup/ShardSpace/ShardRegion + +func (r *ShardingDatabaseReconciler) checkShardGroup(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) error { + + // We need to check Shard Region and Shard Group for ShardingType='SYSTEM' and 'NATIVE' + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { + if len(OraShardSpex.ShardRegion) == 0 { + return fmt.Errorf("Shard region cannot be empty! in " + OraShardSpex.Name) + } + if len(OraShardSpex.ShardGroup) == 0 { + return fmt.Errorf("Shard group in " + OraShardSpex.Name + " cannot be empty") + } + + // + + } + return nil +} + // Compare GSM Env Variables -func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -916,7 +1027,7 @@ func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev1 return true } -func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -941,7 +1052,7 @@ func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databa return true } -func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -968,21 +1079,21 @@ func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *database //===== Set the CRD resource life cycle state ======== -func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev1alpha1.ShardingDatabase, result *ctrl.Result, err *error, stateType *string) { +func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev4.ShardingDatabase, result *ctrl.Result, err *error, stateType *string) { var metaCondition metav1.Condition var updateFlag = false if *stateType == "ReconcileWaiting" { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev1alpha1.CrdReconcileWaitingReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev4.CrdReconcileWaitingReason)) updateFlag = true } else if *stateType == "ReconcileComplete" { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev1alpha1.CrdReconcileCompleteReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev4.CrdReconcileCompleteReason)) updateFlag = true } else if result.Requeue { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev1alpha1.CrdReconcileQueuedState), string(databasev1alpha1.CrdReconcileQueuedReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev4.CrdReconcileQueuedState), string(databasev4.CrdReconcileQueuedReason)) updateFlag = true } else if *err != nil { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev1alpha1.CrdReconcileErrorState), string(databasev1alpha1.CrdReconcileErrorReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev4.CrdReconcileErrorState), string(databasev4.CrdReconcileErrorReason)) updateFlag = true } else { @@ -999,7 +1110,7 @@ func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev1al } -func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev4.ShardingDatabase) error { var err error _, _, err = r.validateCatalog(instance) if err != nil { @@ -1012,7 +1123,7 @@ func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev1alp return nil } -func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev4.ShardingDatabase, ) (*appsv1.StatefulSet, *corev1.Pod, error) { //var err error var i int32 @@ -1028,6 +1139,7 @@ func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev1alpha1.Shar if availableFlag != true { gsmSfSet = gsmSfSet1 gsmPod = gsmPod1 + // availableFlag = true availableFlag = true } } @@ -1039,7 +1151,7 @@ func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev1alpha1.Shar return gsmSfSet, gsmPod, fmt.Errorf("GSM is not ready") } -func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec, specId int, +func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { //var err error var i int32 @@ -1051,42 +1163,44 @@ func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev1alp podList := &corev1.PodList{} var isPodExist bool + // VV : uninitialised variable 'i' being used. + i = int32(specId) gsmSfSet, err = shardingv1.CheckSfset(OraGsmSpex.Name, instance, r.Client) if err != nil { msg = "Unable to find GSM statefulset " + shardingv1.GetFmtStr(OraGsmSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.StatefulSetNotFound)) return gsmSfSet, gsmPod, err } podList, err = shardingv1.GetPodList(gsmSfSet.Name, "GSM", instance, r.Client) if err != nil { msg = "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(gsmSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.PodNotFound)) return gsmSfSet, gsmPod, err } isPodExist, gsmPod = shardingv1.PodListValidation(podList, gsmSfSet.Name, instance, r.Client) if !isPodExist { msg = "Unable to validate GSM " + shardingv1.GetFmtStr(gsmPod.Name) + " pod. GSM pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.PodNotReadyState)) - return gsmSfSet, gsmPod, fmt.Errorf("Pod doesn't exist") + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.PodNotReadyState)) + return gsmSfSet, gsmPod, fmt.Errorf("pod doesn't exist") } err = shardingv1.CheckGsmStatus(gsmPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg = "Unable to validate GSM director. GSM director doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.ProvisionState)) return gsmSfSet, gsmPod, err } - r.updateGsmStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateGsmStatus(instance, specId, string(databasev4.AvailableState)) return gsmSfSet, gsmPod, nil } -func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev4.ShardingDatabase, ) (*appsv1.StatefulSet, *corev1.Pod, error) { catalogSfSet := &appsv1.StatefulSet{} @@ -1101,6 +1215,7 @@ func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev1alpha1. if availlableFlag != true { catalogSfSet = catalogSfSet1 catalogPod = catalogPod1 + // availlableFlag = true availlableFlag = true } } @@ -1114,7 +1229,7 @@ func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev1alpha1. } // === Validate Individual Catalog -func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec, specId int, +func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { var err error @@ -1126,40 +1241,40 @@ func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev catalogSfSet, err = shardingv1.CheckSfset(OraCatalogSpex.Name, instance, r.Client) if err != nil { msg := "Unable to find Catalog statefulset " + shardingv1.GetFmtStr(OraCatalogSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.StatefulSetNotFound)) return catalogSfSet, catalogPod, err } podList, err = shardingv1.GetPodList(catalogSfSet.Name, "CATALOG", instance, r.Client) if err != nil { msg := "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(catalogSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.PodNotFound)) return catalogSfSet, catalogPod, err } isPodExist, catalogPod = shardingv1.PodListValidation(podList, catalogSfSet.Name, instance, r.Client) if !isPodExist { msg := "Unable to validate Catalog " + shardingv1.GetFmtStr(catalogSfSet.Name) + " pod. Catalog pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.PodNotReadyState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.PodNotReadyState)) return catalogSfSet, catalogPod, fmt.Errorf("Pod doesn't exist") } err = shardingv1.ValidateDbSetup(catalogPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg := "Unable to validate Catalog. Catalog doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.ProvisionState)) return catalogSfSet, catalogPod, err } - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateCatalogStatus(instance, specId, string(databasev4.AvailableState)) return catalogSfSet, catalogPod, nil } // ======= Function to validate Shard -func (r *ShardingDatabaseReconciler) validateShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec, specId int, +func (r *ShardingDatabaseReconciler) validateShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { var err error @@ -1169,59 +1284,56 @@ func (r *ShardingDatabaseReconciler) validateShard(instance *databasev1alpha1.Sh shardSfSet, err = shardingv1.CheckSfset(OraShardSpex.Name, instance, r.Client) if err != nil { msg := "Unable to find Shard statefulset " + shardingv1.GetFmtStr(OraShardSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.StatefulSetNotFound)) return shardSfSet, shardPod, err } podList, err := shardingv1.GetPodList(shardSfSet.Name, "SHARD", instance, r.Client) if err != nil { msg := "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(shardSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.PodNotFound)) return shardSfSet, shardPod, err } isPodExist, shardPod := shardingv1.PodListValidation(podList, shardSfSet.Name, instance, r.Client) if !isPodExist { msg := "Unable to validate Shard " + shardingv1.GetFmtStr(shardPod.Name) + " pod. Shard pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.PodNotReadyState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.PodNotReadyState)) return shardSfSet, shardPod, err } err = shardingv1.ValidateDbSetup(shardPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg := "Unable to validate shard. Shard doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.ProvisionState)) return shardSfSet, shardPod, err } - r.updateShardStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateShardStatus(instance, specId, string(databasev4.AvailableState)) return shardSfSet, shardPod, nil } // This function updates the shard topology over all -// -func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev4.ShardingDatabase) { //shardPod := &corev1.Pod{} //gsmSfSet := &appsv1.StatefulSet{} gsmPod := &corev1.Pod{} var err error _, _, err = r.validateCatalog(instance) if err != nil { - return err + } _, gsmPod, err = r.validateGsm(instance) if err != nil { - return err + } r.updateShardTopologyShardsInGsm(instance, gsmPod) - return nil - } -func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *databasev1alpha1.ShardingDatabase, gsmPod *corev1.Pod) { +func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *databasev4.ShardingDatabase, gsmPod *corev1.Pod) { shardSfSet := &appsv1.StatefulSet{} //shardPod := &corev1.Pod{} //gsmSfSet := &appsv1.StatefulSet{} @@ -1230,13 +1342,16 @@ func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *da if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] + if strings.ToLower(OraShardSpex.IsDelete) == "failed" { + continue + } // stateStr := shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { shardSfSet, _, err = r.validateShard(instance, OraShardSpex, int(i)) if err != nil { continue } else { - _ = r.verifyShards(instance, gsmPod, shardSfSet) + _ = r.verifyShards(instance, gsmPod, shardSfSet, OraShardSpex) } } @@ -1244,7 +1359,7 @@ func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *da } } -func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var currState string var eventMsg string @@ -1274,7 +1389,7 @@ func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev1alpha1. } } -func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var eventMsg string var currState string var eventMsgFlag = true @@ -1282,7 +1397,7 @@ func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alp name := instance.Spec.Catalog[specIdx].Name if len(instance.Status.Catalog) > 0 { - currState = shardingv1.GetGsmCatalogStatusKey(instance, name+"_"+string(databasev1alpha1.State)) + currState = shardingv1.GetGsmCatalogStatusKey(instance, name+"_"+string(databasev4.State)) if currState == state { eventMsgFlag = false } @@ -1301,14 +1416,14 @@ func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alp } } -func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var eventMsg string var currState string var eventMsgFlag = true name := instance.Spec.Shard[specIdx].Name if len(instance.Status.Shard) > 0 { - currState = shardingv1.GetGsmShardStatusKey(instance, name+"_"+string(databasev1alpha1.State)) + currState = shardingv1.GetGsmShardStatusKey(instance, name+"_"+string(databasev4.State)) if currState == state { eventMsgFlag = false } @@ -1327,7 +1442,7 @@ func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev1alpha } } -func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev1alpha1.ShardingDatabase, name string, state string) { +func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev4.ShardingDatabase, name string, state string) { var eventMsg string var currState string var eventMsgFlag = true @@ -1358,7 +1473,7 @@ func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev1al } // This function add the Primary Shards in GSM -func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev4.ShardingDatabase) error { //var result ctrl.Result var result ctrl.Result var i int32 @@ -1368,7 +1483,7 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 //gsmSfSet := &appsv1.StatefulSet{} gsmPod := &corev1.Pod{} var sparams1 string - var deployFlag = false + var deployFlag = true var errStr = false //var msg string @@ -1382,11 +1497,12 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] // stateStr := shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) - // !strings.Contains(stateStr, "DELETE") - if OraShardSpex.IsDelete != true { + // strings.Contains(stateStr, "DELETE") + + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true - stateType := string(databasev1alpha1.CrdReconcileWaitingState) + stateType := string(databasev4.CrdReconcileWaitingState) r.setCrdLifeCycleState(instance, &result, &err, &stateType) } // 1st Step is to check if Shard is in good state if not then just continue @@ -1394,52 +1510,97 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 shardSfSet, _, err = r.validateShard(instance, OraShardSpex, int(i)) if err != nil { errStr = true + deployFlag = false continue } // 2nd Step is to check if GSM is in good state if not then just return because you can't do anything _, gsmPod, err = r.validateGsm(instance) if err != nil { + deployFlag = false return err } // 3rd step to check if shard is in GSM if not then continue - sparams := shardingv1.BuildShardParams(shardSfSet) + sparams := shardingv1.BuildShardParams(instance, shardSfSet, OraShardSpex) sparams1 = sparams err = shardingv1.CheckShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err == nil { // if you are in this block then it means that shard already exist in the GSM and we do not need to anything continue } + + /** + // Copy file from pod to FS + configrest, kclientset, err := shardingv1.GetPodCopyConfig(r.kubeClient, r.kubeConfig, instance, r.Log) + if err != nil { + return fmt.Errorf("Error occurred in getting KubeConfig, cannot perform copy operation from the pod") + } + + _, _, err = shardingv1.ExecCommand(gsmPod.Name, shardingv1.GetTdeKeyLocCmd(), r.kubeClient, r.kubeConfig, instance, r.Log) + if err != nil { + fmt.Printf("Error occurred during the while getting the TDE key from the pod " + gsmPod.Name) + //return err + } + fileName := "/tmp/tde_key" + last := fileName[strings.LastIndex(fileName, "/")+1:] + fileName1 := last + fsLoc := shardingv1.TmpLoc + "/" + fileName1 + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fmt.Sprintf("%s/%s:/%s", instance.Namespace, gsmPod.Name, fileName), fsLoc, "") + if err != nil { + fmt.Printf("failed to copy file") + //return err + } + + // Copying it to Shard Pod + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fsLoc, fmt.Sprintf("%s/%s:/%s", instance.Namespace, OraShardSpex.Name+"-0", fsLoc), "") + if err != nil { + fmt.Printf("failed to copy file") + //return err + } + + **/ + // If the shard doesn't exist in GSM then just add the shard statefulset and update GSM shard status // ADD Shard in GSM - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.AddingShardState)) - err := shardingv1.AddShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) + + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.AddingShardState)) + err = shardingv1.AddShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.AddingShardErrorState)) - title = "Shard Addition Failure" - message = "Error occurred during shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " addition." - r.sendMessage(instance, title, message) - } else { - deployFlag = true + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.AddingShardErrorState)) + title = instance.Namespace + ":Shard Addition Failure" + message = "TopicId:" + oshMap[instance.Name].Topicid + ":Error occurred during shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " addition." + shardingv1.LogMessages("Error", title+":"+message, nil, instance, r.Log) + msgKey := instance.Namespace + "-" + OraShardSpex.Name + if sentFailMsg[msgKey] != true { + r.sendMessage(instance, title, message) + } + sentFailMsg[msgKey] = true + sentCompleteMsg[msgKey] = false + deployFlag = false } } } + if errStr == true { + shardingv1.LogMessages("INFO", "Some shards are still pending for addition. Requeue the reconcile loop.", nil, instance, r.Log) + return fmt.Errorf("shards are not ready for addition.") + } + // ======= Deploy Shard Logic ========= if deployFlag == true { _ = shardingv1.DeployShardInGsm(gsmPod.Name, sparams1, instance, r.kubeClient, r.kubeConfig, r.Log) r.updateShardTopologyShardsInGsm(instance, gsmPod) + } else { + shardingv1.LogMessages("INFO", "Shards are not added in GSM. Deploy operation will happen after shard addition. Requeue the reconcile loop.", nil, instance, r.Log) + return fmt.Errorf("shards addition are pending.") } } - if errStr == true { - shardingv1.LogMessages("INFO", "Some shards are still pending for addition. Requeue the reconcile loop.", nil, instance, r.Log) - return fmt.Errorf("Shard Addition is pending.") - } + shardingv1.LogMessages("INFO", "Completed the shard addition operation. For details, check the CRD resource status for GSM and Shards.", nil, instance, r.Log) return nil } // This function Check the online shard -func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev1alpha1.ShardingDatabase, gsmPod *corev1.Pod, shardSfSet *appsv1.StatefulSet) error { +func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev4.ShardingDatabase, gsmPod *corev1.Pod, shardSfSet *appsv1.StatefulSet, OraShardSpex databasev4.ShardSpec) error { //var result ctrl.Result //var i int32 var err error @@ -1447,34 +1608,43 @@ func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev1alpha1.Sha var message string // ================================ Check Shards ================== //veryify shard make shard state online and it must be executed to check shard state after every CRUD operation - sparams := shardingv1.BuildShardParams(shardSfSet) + sparams := shardingv1.BuildShardParams(instance, shardSfSet, OraShardSpex) err = shardingv1.CheckOnlineShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status - r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev1alpha1.ShardOnlineErrorState)) - shardingv1.CancelChunksInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) + r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev4.ShardOnlineErrorState)) + if strings.ToUpper(instance.Spec.ReplicationType) != "NATIVE" { + shardingv1.CancelChunksInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) + } return err } oldStateStr := shardingv1.GetGsmShardStatus(instance, shardSfSet.Name) - r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev1alpha1.ShardOnlineState)) + r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev4.ShardOnlineState)) // Following logic will sent a email only once - if oldStateStr != string(databasev1alpha1.ShardOnlineState) { - title = "Shard Addition Completed" - message = "Shard addition completed for shard " + shardingv1.GetFmtStr(shardSfSet.Name) + " in GSM." - r.sendMessage(instance, title, message) + if oldStateStr != string(databasev4.ShardOnlineState) { + title = instance.Namespace + ":Shard Addition Completed" + message = "TopicId:" + oshMap[instance.Name].Topicid + ":Shard addition completed for shard " + shardingv1.GetFmtStr(shardSfSet.Name) + " in GSM." + shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) + msgKey := instance.Namespace + "-" + shardSfSet.Name + if sentCompleteMsg[msgKey] != true { + r.sendMessage(instance, title, message) + } + + sentCompleteMsg[msgKey] = true + sentFailMsg[msgKey] = false } return nil } -func (r *ShardingDatabaseReconciler) addStandbyShards(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) addStandbyShards(instance *databasev4.ShardingDatabase) error { //var result ctrl.Result return nil } // ========== Delete Shard Section==================== -func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev4.ShardingDatabase) error { var result ctrl.Result var i int32 var err error @@ -1489,13 +1659,14 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar shardingv1.LogMessages("DEBUG", "Starting shard deletion operation.", nil, instance, r.Log) // ================================ Shard Delete Logic =================== + if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] - if OraShardSpex.IsDelete == true { + if shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true - stateType := string(databasev1alpha1.CrdReconcileWaitingState) + stateType := string(databasev4.CrdReconcileWaitingState) r.setCrdLifeCycleState(instance, &result, &err, &stateType) } // Step 1st to check if GSM is in good state if not then just return because you can't do anything @@ -1517,51 +1688,76 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar continue } // 3rd step to check if shard is in GSM if not then continue - sparams := shardingv1.BuildShardParams(shardSfSet) + sparams := shardingv1.BuildShardParams(instance, shardSfSet, OraShardSpex) err = shardingv1.CheckShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status r.delShard(instance, shardSfSet.Name, shardSfSet, shardPod, int(i)) - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.Terminated)) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.Terminated)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.Terminated)) + r.updateShardStatus(instance, int(i), string(databasev4.Terminated)) continue } // 4th step to check if shard is in GSM and shard is online if not then continue // CHeck before deletion if GSM is not ready set the Shard State to Delete Error - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.DeletingState)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.DeletingState)) err = shardingv1.CheckOnlineShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.DeleteErrorState)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.DeleteErrorState)) continue } // 5th Step // Move the chunks before performing any Delete // If you are in this block then it means that shard is ONline and can be deleted - err = shardingv1.MoveChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) - if err != nil { - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.ChunkMoveError)) - title = "Chunk Movement Failure" - message = "Error occurred during chunk movement in shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " deletion." - r.sendMessage(instance, title, message) - continue - } - // 6th Step - // Check if Chunks has moved before performing actual delete - // This is a loop and will check unless there is a error or chunks has moved - // Validate if the chunks has moved before performing shard deletion - for { - err = shardingv1.VerifyChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) - if err == nil { - break - } else { - msg = "Sleeping for 120 seconds and will check status again of chunks movement in gsm for shard: " + shardingv1.GetFmtStr(OraShardSpex.Name) - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - time.Sleep(120 * time.Second) + if strings.ToUpper(instance.Spec.ReplicationType) != "NATIVE" { + if len(instance.Spec.ReplicationType) == 0 { + err = shardingv1.MoveChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) + if err != nil { + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.ChunkMoveError)) + title = "Chunk Movement Failure" + message = "Error occurred during chunk movement in shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " deletion." + r.sendMessage(instance, title, message) + instance.Spec.Shard[i].IsDelete = "failed" + err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") + if err != nil { + msg = "Error occurred while changing the isDelete value to failed in Spec struct" + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + return err + } + continue + } + // 6th Step + // Check if Chunks has moved before performing actual delete + // This is a loop and will check unless there is a error or chunks has moved + // Validate if the chunks has moved before performing shard deletion + for { + msg = "Sleeping for 120 seconds and will check status again of chunks movement in gsm for shard: " + shardingv1.GetFmtStr(OraShardSpex.Name) + "ShardType=" + strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + time.Sleep(120 * time.Second) + err = shardingv1.VerifyChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) + if err == nil { + break + } else { + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { + // If ShardingType is not "USER", do not perform the patching.. continue + continue + } + instance.Spec.Shard[i].IsDelete = "failed" + err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") + if err != nil { + // r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.ChunkMoveError)) + msg = "Error occurred while changing the isDelete value to failed in Spec struct" + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + // return err + } + return err + } + } } } + // 7th Step remove the shards from the GSM // This steps will delete the shard entry from the GSM // It will delete CDB from catalog @@ -1570,14 +1766,16 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar if err != nil { msg = "Error occurred during shard" + shardingv1.GetFmtStr(OraShardSpex.Name) + "removal from Gsm" shardingv1.LogMessages("Error", msg, nil, instance, r.Log) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.ShardRemoveError)) + r.updateShardStatus(instance, int(i), string(databasev4.ShardRemoveError)) + instance.Spec.Shard[i].IsDelete = "failed" continue } + // 8th Step // Delete the Statefulset as all the chunks has moved and Shard can be phyiscally deleted r.delShard(instance, shardSfSet.Name, shardSfSet, shardPod, int(i)) - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.Terminated)) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.Terminated)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.Terminated)) + r.updateShardStatus(instance, int(i), string(databasev4.Terminated)) title = "Shard Deletion Completed" message = "Shard deletion completed for shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " in GSM." r.sendMessage(instance, title, message) @@ -1589,7 +1787,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar } // This function delete the physical shard -func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.ShardingDatabase, sfSetName string, sfSetFound *appsv1.StatefulSet, sfsetPod *corev1.Pod, specIdx int) { +func (r *ShardingDatabaseReconciler) delShard(instance *databasev4.ShardingDatabase, sfSetName string, sfSetFound *appsv1.StatefulSet, sfsetPod *corev1.Pod, specIdx int) { //var status bool var err error @@ -1600,7 +1798,7 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg := "Failed to patch the Shard StatefulSet: " + sfSetFound.Name shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.LabelPatchingError)) + r.updateShardStatus(instance, specIdx, string(databasev4.LabelPatchingError)) return } @@ -1608,7 +1806,7 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg = "Failed to delete Shard StatefulSet: " + shardingv1.GetFmtStr(sfSetFound.Name) shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.DeleteErrorState)) + r.updateShardStatus(instance, specIdx, string(databasev4.DeleteErrorState)) return } /// Delete External Service @@ -1639,14 +1837,14 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg = "Failed to delete Shard pvc claim " + shardingv1.GetFmtStr(pvcName) shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.DeletePVCError)) + r.updateShardStatus(instance, specIdx, string(databasev4.DeletePVCError)) } } } -//======== GSM Invited Node ========== +// ======== GSM Invited Node ========== // Remove and add GSM invited node -func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev1alpha1.ShardingDatabase, objName string, +func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev4.ShardingDatabase, objName string, ) { var msg string @@ -1672,7 +1870,7 @@ func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev1alpha1 count = count + 1 continue } - err, _, _ = shardingv1.ExecCommand(gsmPodName.Name, shardingv1.GetShardInviteNodeCmd(objName), r.kubeClient, r.kubeConfig, instance, r.Log) + _, _, err = shardingv1.ExecCommand(gsmPodName.Name, shardingv1.GetShardInviteNodeCmd(objName), r.kubeClient, r.kubeConfig, instance, r.Log) if err != nil { msg = "Invite delete and add node failed " + shardingv1.GetFmtStr(objName) + " details in GSM." shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) @@ -1689,10 +1887,10 @@ func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev1alpha1 // ================================== CREATE FUNCTIONS ============================= // This function create a service based isExtern parameter set in the yaml file -func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) createService(instance *databasev4.ShardingDatabase, dep *corev1.Service, ) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) // See if Service already exists and create if it doesn't // We are getting error on nil pointer segment when r.scheme is null // Error : invalid memory address or nil pointer dereference" (runtime error: invalid memory address or nil pointer dereference) @@ -1709,7 +1907,7 @@ func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.Sh err := r.Client.Get(context.TODO(), types.NamespacedName{ Name: dep.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, found) jsn, _ := json.Marshal(dep) @@ -1736,20 +1934,29 @@ func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.Sh } // This function deploy the statefulset -func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev4.ShardingDatabase, dep *appsv1.StatefulSet, resType string, ) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) message := "Inside the deployStatefulSet function" shardingv1.LogMessages("DEBUG", message, nil, instance, r.Log) // See if StatefulSets already exists and create if it doesn't + // Error : invalid memory address or nil pointer dereference" (runtime error: invalid memory address or nil pointer dereference) + // This happens during unit test cases + for i := 0; i < 5; i++ { + if r.Scheme == nil { + time.Sleep(time.Second * 40) + } else { + break + } + } controllerutil.SetControllerReference(instance, dep, r.Scheme) found := &appsv1.StatefulSet{} err := r.Client.Get(context.TODO(), types.NamespacedName{ Name: dep.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, found) jsn, _ := json.Marshal(dep) shardingv1.LogMessages("DEBUG", string(jsn), nil, instance, r.Log) @@ -1780,3 +1987,53 @@ func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha return ctrl.Result{}, nil } + +func (r *ShardingDatabaseReconciler) checkShardState(instance *databasev4.ShardingDatabase) error { + + var i int32 + var err error = nil + var OraShardSpex databasev4.ShardSpec + var currState string + var eventMsg string + var msg string + + currState = "" + eventMsg = "" + + msg = "checkShardState():ShardType=" + strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { + // ShardingType is not "USER", so return + return err + } + + if len(instance.Status.Gsm.Shards) > 0 { + for i = 0; i < int32(len(instance.Spec.Shard)); i++ { + OraShardSpex = instance.Spec.Shard[i] + currState = shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) + if OraShardSpex.IsDelete == "failed" { + eventMsg = "Shard Deletion failed for [" + OraShardSpex.Name + "]. Retry shard deletion after manually moving the chunks. Requeuing" + err = fmt.Errorf(eventMsg) + } else if currState == string(databasev4.AddingShardState) { + eventMsg = "Shard Addition in progress for [" + OraShardSpex.Name + "]. Requeuing" + err = fmt.Errorf(eventMsg) + } else if currState == string(databasev4.DeletingState) { + eventMsg = "Shard Deletion in progress for [" + OraShardSpex.Name + "]. Requeuing" + err = fmt.Errorf(eventMsg) + err = nil + } else if currState == string(databasev4.DeleteErrorState) { + eventMsg = "Shard Deletion Error for [" + OraShardSpex.Name + "]. Manual intervention required. Requeuing" + err = fmt.Errorf(eventMsg) + } else if currState == string(databasev4.ShardRemoveError) { + eventMsg = "Shard Deletion Error for [" + OraShardSpex.Name + "]. Manual intervention required. Requeuing" + err = fmt.Errorf(eventMsg) + } else { + eventMsg = "checkShardState() : Shard State[" + OraShardSpex.Name + "]=[" + currState + "]" + shardingv1.LogMessages("INFO", eventMsg, nil, instance, r.Log) + err = nil + } + r.publishEvents(instance, eventMsg, currState) + } + } + return err +} diff --git a/controllers/database/singleinstancedatabase_controller.go b/controllers/database/singleinstancedatabase_controller.go index c69ba5b3..13f2ec6f 100644 --- a/controllers/database/singleinstancedatabase_controller.go +++ b/controllers/database/singleinstancedatabase_controller.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2023 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -46,17 +46,21 @@ import ( "strings" "time" - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" dbcommons "github.com/oracle/oracle-database-operator/commons/database" + "golang.org/x/text/cases" + "golang.org/x/text/language" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -78,12 +82,26 @@ type SingleInstanceDatabaseReconciler struct { var requeueY ctrl.Result = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second} var requeueN ctrl.Result = ctrl.Result{} +// For scheduling reconcile to renew certs if TCPS is enabled +// Default value is requeueN (No reconcile) +var futureRequeue ctrl.Result = requeueN + const singleInstanceDatabaseFinalizer = "database.oracle.com/singleinstancedatabasefinalizer" +var oemExpressUrl string + +var ErrNotPhysicalStandby error = errors.New("database not in PHYSICAL_STANDBY role") +var ErrDBNotConfiguredWithDG error = errors.New("database is not configured with a dataguard configuration") +var ErrFSFOEnabledForDGConfig error = errors.New("database is configured with dataguard and FSFO enabled") +var ErrAdminPasswordSecretNotFound error = errors.New("Admin password secret for the database not found") + //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases/status,verbs=get;update;patch //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services;nodes;events,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=persistentvolumes,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -104,6 +122,7 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct singleInstanceDatabase := &dbapi.SingleInstanceDatabase{} cloneFromDatabase := &dbapi.SingleInstanceDatabase{} + referredPrimaryDatabase := &dbapi.SingleInstanceDatabase{} // Execute for every reconcile defer r.updateReconcileStatus(singleInstanceDatabase, ctx, &result, &err, &blocked, &completed) @@ -114,18 +133,40 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct r.Log.Info("Resource not found") return requeueN, nil } + r.Log.Error(err, err.Error()) return requeueY, err } + /* Initialize Status */ + if singleInstanceDatabase.Status.Status == "" { + singleInstanceDatabase.Status.Status = dbcommons.StatusPending + if singleInstanceDatabase.Spec.Edition != "" { + singleInstanceDatabase.Status.Edition = cases.Title(language.English).String(singleInstanceDatabase.Spec.Edition) + } else { + singleInstanceDatabase.Status.Edition = dbcommons.ValueUnavailable + } + singleInstanceDatabase.Status.Role = dbcommons.ValueUnavailable + singleInstanceDatabase.Status.ConnectString = dbcommons.ValueUnavailable + singleInstanceDatabase.Status.PdbConnectString = dbcommons.ValueUnavailable + singleInstanceDatabase.Status.TcpsConnectString = dbcommons.ValueUnavailable + singleInstanceDatabase.Status.OemExpressUrl = dbcommons.ValueUnavailable + singleInstanceDatabase.Status.ReleaseUpdate = dbcommons.ValueUnavailable + r.Status().Update(ctx, singleInstanceDatabase) + } + // Manage SingleInstanceDatabase Deletion result, err = r.manageSingleInstanceDatabaseDeletion(req, ctx, singleInstanceDatabase) if result.Requeue { r.Log.Info("Reconcile queued") return result, nil } + if err != nil { + r.Log.Error(err, err.Error()) + return result, err + } // First validate - result, err = r.validate(singleInstanceDatabase, cloneFromDatabase, ctx, req) + result, err = r.validate(singleInstanceDatabase, cloneFromDatabase, referredPrimaryDatabase, ctx, req) if result.Requeue { r.Log.Info("Spec validation failed, Reconcile queued") return result, nil @@ -135,43 +176,36 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct return result, nil } - // Service creation - result, err = r.createOrReplaceSVC(ctx, req, singleInstanceDatabase) + // PVC Creation for Datafiles Volume + result, err = r.createOrReplacePVCforDatafilesVol(ctx, req, singleInstanceDatabase) if result.Requeue { r.Log.Info("Reconcile queued") return result, nil } - // PVC Creation - result, err = r.createOrReplacePVC(ctx, req, singleInstanceDatabase) + // PVC Creation for customScripts Volume + result, err = r.createOrReplacePVCforCustomScriptsVol(ctx, req, singleInstanceDatabase) if result.Requeue { r.Log.Info("Reconcile queued") return result, nil } // POD creation - result, err = r.createOrReplacePods(singleInstanceDatabase, cloneFromDatabase, ctx, req) + result, err = r.createOrReplacePods(singleInstanceDatabase, cloneFromDatabase, referredPrimaryDatabase, ctx, req) if result.Requeue { r.Log.Info("Reconcile queued") return result, nil } - if singleInstanceDatabase.Status.DatafilesCreated != "true" { - // Creation of Oracle Wallet for Single Instance Database credentials - result, err = r.createWallet(singleInstanceDatabase, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - if err != nil { - r.Log.Info("Spec validation failed") - return result, nil - } + // Service creation + result, err = r.createOrReplaceSVC(ctx, req, singleInstanceDatabase) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil } // Validate readiness - var readyPod corev1.Pod - result, readyPod, err = r.validateDBReadiness(singleInstanceDatabase, ctx, req) + result, readyPod, err := r.validateDBReadiness(singleInstanceDatabase, ctx, req) if result.Requeue { r.Log.Info("Reconcile queued") return result, nil @@ -188,22 +222,80 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct } } - // Update DB config - result, err = r.updateDBConfig(singleInstanceDatabase, readyPod, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil + sidbRole, err := dbcommons.GetDatabaseRole(readyPod, r, r.Config, ctx, req) + + if sidbRole == "PRIMARY" { + // Update DB config + result, err = r.updateDBConfig(singleInstanceDatabase, readyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Update Init Parameters + result, err = r.updateInitParameters(singleInstanceDatabase, readyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + // Configure TCPS + result, err = r.configTcps(singleInstanceDatabase, readyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + + } else { + if singleInstanceDatabase.Status.DgBroker == nil { + err = SetupStandbyDatabase(r, singleInstanceDatabase, referredPrimaryDatabase, ctx, req) + if err != nil { + return requeueY, err + } + } + + databaseOpenMode, err := dbcommons.GetDatabaseOpenMode(readyPod, r, r.Config, ctx, req, singleInstanceDatabase.Spec.Edition) + + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, err + } + r.Log.Info("DB openMode Output") + r.Log.Info(databaseOpenMode) + if databaseOpenMode == "READ_ONLY" || databaseOpenMode == "MOUNTED" { + // Changing the open mode for sidb to "READ ONLY WITH APPLY" + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.ModifyStdbyDBOpenMode, dbcommons.SQLPlusCLI)) + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, err + } + r.Log.Info("Standby DB open mode modified") + r.Log.Info(out) + } + + singleInstanceDatabase.Status.PrimaryDatabase = referredPrimaryDatabase.Name + // Store all standbyDatabase sid:name in a map to use it during manual switchover. + if len(referredPrimaryDatabase.Status.StandbyDatabases) == 0 { + referredPrimaryDatabase.Status.StandbyDatabases = make(map[string]string) + } + referredPrimaryDatabase.Status.StandbyDatabases[strings.ToUpper(singleInstanceDatabase.Spec.Sid)] = singleInstanceDatabase.Name + r.Status().Update(ctx, referredPrimaryDatabase) + } - // Update Init Parameters - result, err = r.updateInitParameters(singleInstanceDatabase, readyPod, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil + // manage snapshot database creation + if singleInstanceDatabase.Spec.ConvertToSnapshotStandby != singleInstanceDatabase.Status.ConvertToSnapshotStandby { + result, err := r.manageConvPhysicalToSnapshot(ctx, req) + if err != nil { + return requeueN, err + } + if result.Requeue { + return requeueY, nil + } } // Run Datapatch - if singleInstanceDatabase.Status.DatafilesPatched != "true" { + if strings.ToUpper(singleInstanceDatabase.Status.Role) == "PRIMARY" && singleInstanceDatabase.Status.DatafilesPatched != "true" { // add a blocking reconcile condition err = errors.New("processing datapatch execution") blocked = true @@ -215,25 +307,44 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct } } - // If LoadBalancer = true , ensure Connect String is updated + // This is to ensure that in case of LoadBalancer services the, the Load Balancer is ready to serve the requests if singleInstanceDatabase.Status.ConnectString == dbcommons.ValueUnavailable { + r.Log.Info("Connect string not available for the database " + singleInstanceDatabase.Name) return requeueY, nil } - // update status to Ready after all operations succeed - singleInstanceDatabase.Status.Status = dbcommons.StatusReady + // updating singleinstancedatabase Status + err = r.updateSidbStatus(singleInstanceDatabase, readyPod, ctx, req) + if err != nil { + return requeueY, err + } + r.updateORDSStatus(singleInstanceDatabase, ctx, req) completed = true r.Log.Info("Reconcile completed") + + // Scheduling a reconcile for certificate renewal, if TCPS is enabled + if futureRequeue != requeueN { + r.Log.Info("Scheduling Reconcile for cert renewal", "Duration(Hours)", futureRequeue.RequeueAfter.Hours()) + copyFutureRequeue := futureRequeue + futureRequeue = requeueN + return copyFutureRequeue, nil + } + return requeueN, nil } -//############################################################################# -// Update each reconcile condtion/status -//############################################################################# +// ############################################################################# +// +// Update each reconcile condtion/status +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) updateReconcileStatus(m *dbapi.SingleInstanceDatabase, ctx context.Context, result *ctrl.Result, err *error, blocked *bool, completed *bool) { + // Always refresh status before a reconcile + defer r.Status().Update(ctx, m) + errMsg := func() string { if *err != nil { return (*err).Error() @@ -284,54 +395,58 @@ func (r *SingleInstanceDatabaseReconciler) updateReconcileStatus(m *dbapi.Single meta.RemoveStatusCondition(&m.Status.Conditions, condition.Type) } meta.SetStatusCondition(&m.Status.Conditions, condition) - // Always refresh status before a reconcile - r.Status().Update(ctx, m) } -//############################################################################# -// Validate the CRD specs -// m = SingleInstanceDatabase -// n = CloneFromDatabase -//############################################################################# +// ############################################################################# +// +// Validate the CRD specs +// m = SingleInstanceDatabase +// n = CloneFromDatabase +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatabase, - n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + n *dbapi.SingleInstanceDatabase, rp *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var err error eventReason := "Spec Error" var eventMsgs []string - // If Express Edition , Ensure Replicas=1 - if m.Spec.Edition == "express" && m.Spec.Replicas != 1 { - eventMsgs = append(eventMsgs, "XE supports only one replica") + r.Log.Info("Entering reconcile validation") + + //First check image pull secrets + if m.Spec.Image.PullSecrets != "" { + secret := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.Image.PullSecrets, Namespace: m.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + // Secret not found + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) + r.Log.Info(err.Error()) + m.Status.Status = dbcommons.StatusError + return requeueY, err + } + r.Log.Error(err, err.Error()) + return requeueY, err + } + } + + // If Express/Free Edition, ensure Replicas=1 + if (m.Spec.Edition == "express" || m.Spec.Edition == "free") && m.Spec.Replicas > 1 { + eventMsgs = append(eventMsgs, m.Spec.Edition+" edition supports only one replica") } - // If Block Volume , Ensure Replicas=1 - if m.Spec.Persistence.AccessMode == "ReadWriteOnce" && m.Spec.Replicas != 1 { - eventMsgs = append(eventMsgs, "accessMode ReadWriteOnce supports only one replica") + // If no persistence, ensure Replicas=1 + if m.Spec.Persistence.Size == "" && m.Spec.Replicas > 1 { + eventMsgs = append(eventMsgs, "replicas should be 1 if no persistence is specified") } if m.Status.Sid != "" && !strings.EqualFold(m.Spec.Sid, m.Status.Sid) { eventMsgs = append(eventMsgs, "sid cannot be updated") } - edition := m.Spec.Edition - if m.Spec.Edition == "" { - edition = "Enterprise" - } - if m.Spec.CloneFrom == "" && m.Status.Edition != "" && !strings.EqualFold(m.Status.Edition, edition) { - eventMsgs = append(eventMsgs, "edition cannot be updated") - } if m.Status.Charset != "" && !strings.EqualFold(m.Status.Charset, m.Spec.Charset) { eventMsgs = append(eventMsgs, "charset cannot be updated") } if m.Status.Pdbname != "" && !strings.EqualFold(m.Status.Pdbname, m.Spec.Pdbname) { eventMsgs = append(eventMsgs, "pdbName cannot be updated") } - if m.Status.CloneFrom != "" && - (m.Status.CloneFrom == dbcommons.NoCloneRef && m.Spec.CloneFrom != "" || - m.Status.CloneFrom != dbcommons.NoCloneRef && m.Status.CloneFrom != m.Spec.CloneFrom) { - eventMsgs = append(eventMsgs, "cloneFrom cannot be updated") - } - if m.Spec.Edition == "express" && m.Spec.CloneFrom != "" { - eventMsgs = append(eventMsgs, "cloning not supported for express edition") - } - if m.Status.OrdsReference != "" && m.Status.Persistence.AccessMode != "" && m.Status.Persistence != m.Spec.Persistence { + if m.Status.OrdsReference != "" && m.Status.Persistence.Size != "" && m.Status.Persistence != m.Spec.Persistence { eventMsgs = append(eventMsgs, "uninstall ORDS to change Peristence") } if len(eventMsgs) > 0 { @@ -341,8 +456,8 @@ func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatab return requeueN, err } - // Validating the secret - if m.Status.DatafilesCreated != "true" { + // Validating the secret. Pre-built db doesnt need secret + if !m.Spec.Image.PrebuiltDB && m.Status.DatafilesCreated != "true" { secret := &corev1.Secret{} err = r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, secret) if err != nil { @@ -361,22 +476,32 @@ func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatab // update status fields m.Status.Sid = m.Spec.Sid - m.Status.Edition = strings.Title(edition) m.Status.Charset = m.Spec.Charset m.Status.Pdbname = m.Spec.Pdbname m.Status.Persistence = m.Spec.Persistence - if m.Spec.CloneFrom == "" { - m.Status.CloneFrom = dbcommons.NoCloneRef - } else { - m.Status.CloneFrom = m.Spec.CloneFrom + m.Status.PrebuiltDB = m.Spec.Image.PrebuiltDB + if m.Spec.CreateAs == "truecache" { + // Fetch the Primary database reference, required for all iterations + err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.PrimaryDatabaseRef}, rp) + if err != nil { + if apierrors.IsNotFound(err) { + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) + r.Log.Info(err.Error()) + return requeueN, err + } + return requeueY, err + } } - if m.Spec.CloneFrom != "" { + if m.Spec.CreateAs == "clone" { + // Once a clone database has created , it has no link with its reference - if m.Status.DatafilesCreated == "true" { + if m.Status.DatafilesCreated == "true" || + !dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { return requeueN, nil } + // Fetch the Clone database reference - err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.CloneFrom}, n) + err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.PrimaryDatabaseRef}, n) if err != nil { if apierrors.IsNotFound(err) { r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) @@ -389,16 +514,16 @@ func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatab if n.Status.Status != dbcommons.StatusReady { m.Status.Status = dbcommons.StatusPending eventReason := "Source Database Pending" - eventMsg := "waiting for source database " + m.Spec.CloneFrom + " to be Ready" + eventMsg := "status of database " + n.Name + " is not ready, retrying..." r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) err = errors.New(eventMsg) return requeueY, err } - if !n.Spec.ArchiveLog { + if !*n.Spec.ArchiveLog { m.Status.Status = dbcommons.StatusPending - eventReason := "Source Database Pending" - eventMsg := "waiting for ArchiveLog to turn ON " + n.Name + eventReason := "Source Database Check" + eventMsg := "enable ArchiveLog for database " + n.Name r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) r.Log.Info(eventMsg) err = errors.New(eventMsg) @@ -406,16 +531,67 @@ func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatab } m.Status.Edition = n.Status.Edition + m.Status.PrimaryDatabase = n.Name + } + + if m.Spec.CreateAs == "standby" && m.Status.Role != "PRIMARY" { + + // Fetch the Primary database reference, required for all iterations + err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.PrimaryDatabaseRef}, rp) + if err != nil { + if apierrors.IsNotFound(err) { + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) + r.Log.Info(err.Error()) + return requeueN, err + } + return requeueY, err + } + + if m.Spec.Sid == rp.Spec.Sid { + r.Log.Info("Standby database SID can not be same as the Primary database SID") + r.Recorder.Eventf(m, corev1.EventTypeWarning, "Spec Error", "Standby and Primary database SID can not be same") + m.Status.Status = dbcommons.StatusError + return requeueY, err + } + + if rp.Status.IsTcpsEnabled { + r.Recorder.Eventf(m, corev1.EventTypeWarning, "Cannot Create", "Standby for TCPS enabled Primary Database is not supported ") + m.Status.Status = dbcommons.StatusError + return requeueY, nil + } + + if m.Status.DatafilesCreated == "true" || + !dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + return requeueN, nil + } + m.Status.Edition = rp.Status.Edition + + err = ValidatePrimaryDatabaseForStandbyCreation(r, m, rp, ctx, req) + if err != nil { + return requeueY, err + } + + r.Log.Info("Setting up Primary Database for standby creation...") + err = SetupPrimaryDatabase(r, m, rp, ctx, req) + if err != nil { + return requeueY, err + } } + r.Log.Info("Completed reconcile validation") + return requeueN, nil } -//############################################################################# -// Instantiate POD spec from SingleInstanceDatabase spec -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase) *corev1.Pod { +// ############################################################################# +// +// Instantiate POD spec from SingleInstanceDatabase spec +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase, rp *dbapi.SingleInstanceDatabase, + requiredAffinity bool) *corev1.Pod { + // POD spec pod := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -429,30 +605,168 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns }, }, Spec: corev1.PodSpec{ + Affinity: func() *corev1.Affinity { + if m.Spec.Persistence.AccessMode == "ReadWriteOnce" { + if requiredAffinity { + return &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{m.Name}, + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }}, + }, + } + } else { + return &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{m.Name}, + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }}, + }, + } + } + } + // For ReadWriteMany Access, spread out the PODs + return &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{m.Name}, + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }}, + }, + } + }(), Volumes: []corev1.Volume{{ - Name: "datamount", + Name: "datafiles-vol", + VolumeSource: func() corev1.VolumeSource { + if m.Spec.Persistence.Size == "" { + return corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + } + /* Persistence is specified */ + return corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: m.Name, + ReadOnly: false, + }, + } + }(), + }, { + Name: "oracle-pwd-vol", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: m.Name, - ReadOnly: false, + Secret: &corev1.SecretVolumeSource{ + SecretName: m.Spec.AdminPassword.SecretName, + Optional: func() *bool { i := (m.Spec.Edition != "express" && m.Spec.Edition != "free"); return &i }(), + Items: []corev1.KeyToPath{{ + Key: m.Spec.AdminPassword.SecretKey, + Path: "oracle_pwd", + }}, }, }, + }, { + Name: "tls-secret-vol", + VolumeSource: func() corev1.VolumeSource { + if m.Spec.TcpsTlsSecret == "" { + return corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + } + /* tls-secret is specified */ + return corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: m.Spec.TcpsTlsSecret, + Optional: func() *bool { i := true; return &i }(), + Items: []corev1.KeyToPath{ + { + Key: "tls.crt", // Mount the certificate + Path: "cert.crt", // Mount path inside the container + }, + { + Key: "tls.key", // Mount the private key + Path: "client.key", // Mount path inside the container + }, + }, + }, + } + }(), + }, { + Name: "custom-scripts-vol", + VolumeSource: func() corev1.VolumeSource { + if m.Spec.Persistence.ScriptsVolumeName == "" || m.Spec.Persistence.ScriptsVolumeName == m.Spec.Persistence.DatafilesVolumeName { + return corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + } + /* Persistence.ScriptsVolumeName is specified */ + return corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: m.Name + "-" + m.Spec.Persistence.ScriptsVolumeName, + ReadOnly: false, + }, + } + }(), }}, InitContainers: func() []corev1.Container { - if m.Spec.Edition != "express" { - return []corev1.Container{{ + initContainers := []corev1.Container{} + if m.Spec.Persistence.Size != "" && m.Spec.Persistence.SetWritePermissions != nil && *m.Spec.Persistence.SetWritePermissions { + initContainers = append(initContainers, corev1.Container{ Name: "init-permissions", Image: m.Spec.Image.PullFrom, - Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /opt/oracle/oradata", int(dbcommons.ORACLE_UID), int(dbcommons.ORACLE_GUID))}, + Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /opt/oracle/oradata || true", int(dbcommons.ORACLE_UID), int(dbcommons.ORACLE_GUID))}, SecurityContext: &corev1.SecurityContext{ // User ID 0 means, root user RunAsUser: func() *int64 { i := int64(0); return &i }(), }, VolumeMounts: []corev1.VolumeMount{{ MountPath: "/opt/oracle/oradata", - Name: "datamount", + Name: "datafiles-vol", + }}, + }) + } + if m.Spec.Image.PrebuiltDB { + initContainers = append(initContainers, corev1.Container{ + Name: "init-prebuiltdb", + Image: m.Spec.Image.PullFrom, + Command: []string{"/bin/sh", "-c", dbcommons.InitPrebuiltDbCMD}, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: func() *int64 { i := int64(dbcommons.ORACLE_UID); return &i }(), + RunAsGroup: func() *int64 { i := int64(dbcommons.ORACLE_GUID); return &i }(), + }, + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mnt/oradata", + Name: "datafiles-vol", }}, - }, { + Env: []corev1.EnvVar{ + { + Name: "ORACLE_SID", + Value: strings.ToUpper(m.Spec.Sid), + }, + }, + }) + } + /* Wallet only for edition barring express and free editions, non-prebuiltDB */ + if (m.Spec.Edition != "express" && m.Spec.Edition != "free") && !m.Spec.Image.PrebuiltDB { + initContainers = append(initContainers, corev1.Container{ Name: "init-wallet", Image: m.Spec.Image.PullFrom, Env: []corev1.EnvVar{ @@ -466,21 +780,25 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns }, { Name: "WALLET_DIR", - Value: "/opt/oracle/oradata/dbconfig/$(ORACLE_SID)/.wallet", + Value: "/opt/oracle/oradata/dbconfig/${ORACLE_SID}/.wallet", }, }, Command: []string{"/bin/sh"}, Args: func() []string { edition := "" - if m.Spec.CloneFrom == "" { + if m.Spec.CreateAs != "clone" { edition = m.Spec.Edition if m.Spec.Edition == "" { edition = "enterprise" } } else { - edition = n.Spec.Edition - if n.Spec.Edition == "" { - edition = "enterprise" + if !dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + edition = m.Spec.Edition + } else { + edition = n.Spec.Edition + if n.Spec.Edition == "" { + edition = "enterprise" + } } } return []string{"-c", fmt.Sprintf(dbcommons.InitWalletCMD, edition)} @@ -491,59 +809,128 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns }, VolumeMounts: []corev1.VolumeMount{{ MountPath: "/opt/oracle/oradata", - Name: "datamount", + Name: "datafiles-vol", }}, - }} + }) } - return []corev1.Container{{ - Name: "init-permissions", - Image: m.Spec.Image.PullFrom, - Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /opt/oracle/oradata", int(dbcommons.ORACLE_UID), int(dbcommons.ORACLE_GUID))}, - SecurityContext: &corev1.SecurityContext{ - // User ID 0 means, root user - RunAsUser: func() *int64 { i := int64(0); return &i }(), - }, - VolumeMounts: []corev1.VolumeMount{{ - MountPath: "/opt/oracle/oradata", - Name: "datamount", - }}, - }} + return initContainers }(), Containers: []corev1.Container{{ Name: m.Name, Image: m.Spec.Image.PullFrom, - Lifecycle: &corev1.Lifecycle{ - PreStop: &corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "/bin/echo -en 'shutdown abort;\n' | env ORACLE_SID=${ORACLE_SID^^} sqlplus -S / as sysdba"}, - }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + // Allow priority elevation for DB processes + Add: []corev1.Capability{"SYS_NICE"}, }, }, - ImagePullPolicy: corev1.PullAlways, - Ports: []corev1.ContainerPort{{ContainerPort: 1521}, {ContainerPort: 5500}}, - - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, + Command: func() []string { + // For patching use cases shutdown immediate is needed especially for standby databases + shutdown_mode := "immediate" + if m.Spec.Edition == "express" || m.Spec.Edition == "free" { + // express/free do not support patching + // To terminate any zombie instances left over due to forced termination + shutdown_mode = "abort" + } + return []string{"/bin/sh", "-c", "/bin/echo -en 'shutdown " + shutdown_mode + ";\n' | env ORACLE_SID=${ORACLE_SID^^} sqlplus -S / as sysdba"} + }(), }, }, - InitialDelaySeconds: 20, - TimeoutSeconds: 20, - PeriodSeconds: func() int32 { - if m.Spec.ReadinessCheckPeriod > 0 { - return int32(m.Spec.ReadinessCheckPeriod) - } - return 30 - }(), }, - - VolumeMounts: []corev1.VolumeMount{{ - MountPath: "/opt/oracle/oradata", - Name: "datamount", - }}, + Ports: []corev1.ContainerPort{{ContainerPort: dbcommons.CONTAINER_LISTENER_PORT}, {ContainerPort: 5500}}, + + ReadinessProbe: func() *corev1.Probe { + if m.Spec.CreateAs == "primary" { + return &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi "}, + }, + }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if m.Spec.ReadinessCheckPeriod > 0 { + return int32(m.Spec.ReadinessCheckPeriod) + } + return 60 + }(), + } + } else { + return &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "if [ -f $ORACLE_BASE/oradata/.$ORACLE_SID$CHECKPOINT_FILE_EXTN ]; then if [ -f $ORACLE_BASE/checkDBLockStatus.sh ]; then $ORACLE_BASE/checkDBLockStatus.sh ; else $ORACLE_BASE/checkDBStatus.sh; fi else true; fi "}, + }, + }, + InitialDelaySeconds: 0, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if m.Spec.ReadinessCheckPeriod > 0 { + return int32(m.Spec.ReadinessCheckPeriod) + } + return 60 + }(), + } + } + }(), + VolumeMounts: func() []corev1.VolumeMount { + mounts := []corev1.VolumeMount{} + if m.Spec.Persistence.Size != "" { + mounts = append(mounts, corev1.VolumeMount{ + MountPath: "/opt/oracle/oradata", + Name: "datafiles-vol", + }) + } + if m.Spec.Edition == "express" || m.Spec.Edition == "free" || m.Spec.Image.PrebuiltDB { + // mounts pwd as secrets for express edition or prebuilt db + mounts = append(mounts, corev1.VolumeMount{ + MountPath: "/run/secrets/oracle_pwd", + ReadOnly: true, + Name: "oracle-pwd-vol", + SubPath: "oracle_pwd", + }) + } + if m.Spec.TcpsTlsSecret != "" { + mounts = append(mounts, corev1.VolumeMount{ + MountPath: dbcommons.TlsCertsLocation, + ReadOnly: true, + Name: "tls-secret-vol", + }) + } + if m.Spec.Persistence.ScriptsVolumeName != "" { + mounts = append(mounts, corev1.VolumeMount{ + MountPath: "/opt/oracle/scripts/startup/", + ReadOnly: true, + Name: func() string { + if m.Spec.Persistence.ScriptsVolumeName != m.Spec.Persistence.DatafilesVolumeName { + return "custom-scripts-vol" + } else { + return "datafiles-vol" + } + }(), + SubPath: "startup", + }) + mounts = append(mounts, corev1.VolumeMount{ + MountPath: "/opt/oracle/scripts/setup/", + ReadOnly: true, + Name: func() string { + if m.Spec.Persistence.ScriptsVolumeName != m.Spec.Persistence.DatafilesVolumeName { + return "custom-scripts-vol" + } else { + return "datafiles-vol" + } + }(), + SubPath: "setup", + }) + } + return mounts + }(), Env: func() []corev1.EnvVar { - if m.Spec.CloneFrom == "" { + if m.Spec.CreateAs == "truecache" { return []corev1.EnvVar{ { Name: "SVC_HOST", @@ -551,28 +938,7 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns }, { Name: "SVC_PORT", - Value: "1521", - }, - { - Name: "CREATE_PDB", - Value: func() string { - if m.Spec.Pdbname != "" { - return "true" - } - return "false" - }(), - }, - { - Name: "ORACLE_SID", - Value: strings.ToUpper(m.Spec.Sid), - }, - { - Name: "WALLET_DIR", - Value: "/opt/oracle/oradata/dbconfig/$(ORACLE_SID)/.wallet", - }, - { - Name: "ORACLE_PDB", - Value: m.Spec.Pdbname, + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), }, { Name: "ORACLE_CHARACTERSET", @@ -583,75 +949,245 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns Value: m.Spec.Edition, }, { - Name: "INIT_SGA_SIZE", + Name: "TRUE_CACHE", + Value: "true", + }, + { + Name: "PRIMARY_DB_CONN_STR", Value: func() string { - if m.Spec.InitParams.SgaTarget > 0 && m.Spec.InitParams.PgaAggregateTarget > 0 { - return strconv.Itoa(m.Spec.InitParams.SgaTarget) + if dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + return rp.Name + ":" + strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)) + "/" + rp.Spec.Sid } - return "" + return m.Spec.PrimaryDatabaseRef }(), }, { - Name: "INIT_PGA_SIZE", + Name: "PDB_TC_SVCS", Value: func() string { - if m.Spec.InitParams.SgaTarget > 0 && m.Spec.InitParams.PgaAggregateTarget > 0 { - return strconv.Itoa(m.Spec.InitParams.SgaTarget) - } - return "" + return strings.Join(m.Spec.TrueCacheServices, ";") }(), }, { - Name: "SKIP_DATAPATCH", - Value: "true", + Name: "ORACLE_HOSTNAME", + Value: m.Name, }, } } - return []corev1.EnvVar{ - { - Name: "SVC_HOST", - Value: m.Name, - }, - { - Name: "SVC_PORT", - Value: "1521", + // adding XE support, useful for dev/test/CI-CD + if m.Spec.Edition == "express" || m.Spec.Edition == "free" { + return []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: m.Name, + }, + { + Name: "SVC_PORT", + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), + }, + { + Name: "ORACLE_CHARACTERSET", + Value: m.Spec.Charset, + }, + { + Name: "ORACLE_EDITION", + Value: m.Spec.Edition, + }, + } + } + if m.Spec.CreateAs == "clone" { + // Clone DB use-case + return []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: m.Name, + }, + { + Name: "SVC_PORT", + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), + }, + { + Name: "ORACLE_SID", + Value: strings.ToUpper(m.Spec.Sid), + }, + { + Name: "WALLET_DIR", + Value: "/opt/oracle/oradata/dbconfig/${ORACLE_SID}/.wallet", + }, + { + Name: "PRIMARY_DB_CONN_STR", + Value: func() string { + if dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + return n.Name + ":" + strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)) + "/" + n.Spec.Sid + } + return m.Spec.PrimaryDatabaseRef + }(), + }, + CreateOracleHostnameEnvVarObj(m, n), + { + Name: "CLONE_DB", + Value: "true", + }, + { + Name: "SKIP_DATAPATCH", + Value: "true", + }, + } + + } else if m.Spec.CreateAs == "standby" { + //Standby DB Usecase + return []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: m.Name, + }, + { + Name: "SVC_PORT", + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), + }, + { + Name: "ORACLE_SID", + Value: strings.ToUpper(m.Spec.Sid), + }, + { + Name: "WALLET_DIR", + Value: "/opt/oracle/oradata/dbconfig/${ORACLE_SID}/.wallet", + }, + { + Name: "PRIMARY_DB_CONN_STR", + Value: func() string { + if dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + return rp.Name + ":" + strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)) + "/" + rp.Spec.Sid + } + return m.Spec.PrimaryDatabaseRef + }(), + }, + { + Name: "PRIMARY_SID", + Value: strings.ToUpper(rp.Spec.Sid), + }, + { + Name: "PRIMARY_IP", + Value: rp.Name, + }, + { + Name: "CREATE_PDB", + Value: func() string { + if rp.Spec.Pdbname != "" { + return "true" + } + return "false" + }(), + }, + { + Name: "ORACLE_HOSTNAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + Name: "STANDBY_DB", + Value: "true", + }, + { + Name: "SKIP_DATAPATCH", + Value: "true", + }, + } + } + + return []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: m.Name, + }, + { + Name: "SVC_PORT", + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), + }, + { + Name: "CREATE_PDB", + Value: func() string { + if m.Spec.Pdbname != "" { + return "true" + } + return "false" + }(), }, { Name: "ORACLE_SID", Value: strings.ToUpper(m.Spec.Sid), }, { - Name: "WALLET_DIR", - Value: "/opt/oracle/oradata/dbconfig/$(ORACLE_SID)/.wallet", + Name: "WALLET_DIR", + Value: func() string { + if m.Spec.Image.PrebuiltDB { + return "" // No wallets for prebuilt DB + } + return "/opt/oracle/oradata/dbconfig/${ORACLE_SID}/.wallet" + }(), }, { - Name: "PRIMARY_DB_CONN_STR", - Value: n.Name + ":1521/" + n.Spec.Sid, + Name: "ORACLE_PDB", + Value: m.Spec.Pdbname, }, { - Name: "PRIMARY_SID", - Value: strings.ToUpper(n.Spec.Sid), + Name: "ORACLE_CHARACTERSET", + Value: m.Spec.Charset, }, { - Name: "PRIMARY_NAME", - Value: n.Name, + Name: "ORACLE_EDITION", + Value: m.Spec.Edition, }, { - Name: "ORACLE_HOSTNAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, + Name: "INIT_SGA_SIZE", + Value: func() string { + if m.Spec.InitParams != nil && m.Spec.InitParams.SgaTarget > 0 && m.Spec.InitParams.PgaAggregateTarget > 0 { + return strconv.Itoa(m.Spec.InitParams.SgaTarget) + } + return "" + }(), }, { - Name: "CLONE_DB", - Value: "true", + Name: "INIT_PGA_SIZE", + Value: func() string { + if m.Spec.InitParams != nil && m.Spec.InitParams.SgaTarget > 0 && m.Spec.InitParams.PgaAggregateTarget > 0 { + return strconv.Itoa(m.Spec.InitParams.SgaTarget) + } + return "" + }(), }, { Name: "SKIP_DATAPATCH", Value: "true", }, } + + }(), + + Resources: func() corev1.ResourceRequirements { + var resourceReqRequests corev1.ResourceList = corev1.ResourceList{} + var resourceReqLimits corev1.ResourceList = corev1.ResourceList{} + + if m.Spec.Resources.Requests != nil && m.Spec.Resources.Requests.Cpu != "" { + resourceReqRequests["cpu"] = resource.MustParse(m.Spec.Resources.Requests.Cpu) + } + if m.Spec.Resources.Requests != nil && m.Spec.Resources.Requests.Memory != "" { + resourceReqRequests["memory"] = resource.MustParse(m.Spec.Resources.Requests.Memory) + } + + if m.Spec.Resources.Limits != nil && m.Spec.Resources.Limits.Cpu != "" { + resourceReqLimits["cpu"] = resource.MustParse(m.Spec.Resources.Limits.Cpu) + } + if m.Spec.Resources.Limits != nil && m.Spec.Resources.Limits.Memory != "" { + resourceReqLimits["memory"] = resource.MustParse(m.Spec.Resources.Limits.Memory) + } + + return corev1.ResourceRequirements{ + Requests: resourceReqRequests, + Limits: resourceReqLimits, + } }(), }}, @@ -669,17 +1205,15 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns SecurityContext: &corev1.PodSecurityContext{ RunAsUser: func() *int64 { - i := int64(0) - if m.Spec.Edition != "express" { - i = int64(dbcommons.ORACLE_UID) - } + i := int64(dbcommons.ORACLE_UID) return &i }(), RunAsGroup: func() *int64 { - i := int64(0) - if m.Spec.Edition != "express" { - i = int64(dbcommons.ORACLE_GUID) - } + i := int64(dbcommons.ORACLE_GUID) + return &i + }(), + FSGroup: func() *int64 { + i := int64(dbcommons.ORACLE_GUID) return &i }(), }, @@ -688,61 +1222,86 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns Name: m.Spec.Image.PullSecrets, }, }, + ServiceAccountName: m.Spec.ServiceAccountName, }, } + // Adding pod anti-affinity for standby cases + if m.Spec.CreateAs == "standby" { + weightedPodAffinityTerm := corev1.WeightedPodAffinityTerm{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{rp.Name}, + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + } + if m.Spec.Persistence.AccessMode == "ReadWriteOnce" { + pod.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + weightedPodAffinityTerm, + }, + } + } else { + pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = + append(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weightedPodAffinityTerm) + } + + } + // Set SingleInstanceDatabase instance as the owner and controller ctrl.SetControllerReference(m, pod, r.Scheme) return pod + } -//############################################################################# -// Instantiate Service spec from SingleInstanceDatabase spec -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) instantiateSVCSpec(m *dbapi.SingleInstanceDatabase) *corev1.Service { - svc := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - Labels: map[string]string{ - "app": m.Name, - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "listener", - Port: 1521, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "xmldb", - Port: 5500, - Protocol: corev1.ProtocolTCP, - }, - }, - Selector: map[string]string{ +// ############################################################################# +// +// Instantiate Service spec from SingleInstanceDatabase spec +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) instantiateSVCSpec(m *dbapi.SingleInstanceDatabase, + svcName string, ports []corev1.ServicePort, svcType corev1.ServiceType, publishNotReadyAddress bool) *corev1.Service { + svc := dbcommons.NewRealServiceBuilder(). + SetName(svcName). + SetNamespace(m.Namespace). + SetLabels(func() map[string]string { + return map[string]string{ "app": m.Name, - }, - Type: corev1.ServiceType(func() string { - if m.Spec.LoadBalancer { - return "LoadBalancer" + } + }()). + SetAnnotation(func() map[string]string { + annotations := make(map[string]string) + if len(m.Spec.ServiceAnnotations) != 0 { + for key, value := range m.Spec.ServiceAnnotations { + annotations[key] = value } - return "NodePort" - }()), - }, - } - // Set SingleInstanceDatabase instance as the owner and controller - ctrl.SetControllerReference(m, svc, r.Scheme) - return svc + } + return annotations + }()). + SetPorts(ports). + SetSelector(func() map[string]string { + return map[string]string{ + "app": m.Name, + } + }()). + SetPublishNotReadyAddresses(publishNotReadyAddress). + SetType(svcType). + Build() + ctrl.SetControllerReference(m, &svc, r.Scheme) + return &svc } -//############################################################################# -// Instantiate Persistent Volume Claim spec from SingleInstanceDatabase spec -//############################################################################# +// ############################################################################# +// +// Instantiate Persistent Volume Claim spec from SingleInstanceDatabase spec +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) instantiatePVCSpec(m *dbapi.SingleInstanceDatabase) *corev1.PersistentVolumeClaim { pvc := &corev1.PersistentVolumeClaim{ @@ -755,6 +1314,15 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePVCSpec(m *dbapi.SingleIns Labels: map[string]string{ "app": m.Name, }, + Annotations: func() map[string]string { + if m.Spec.Persistence.VolumeClaimAnnotation != "" { + strParts := strings.Split(m.Spec.Persistence.VolumeClaimAnnotation, ":") + annotationMap := make(map[string]string) + annotationMap[strParts[0]] = strParts[1] + return annotationMap + } + return nil + }(), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: func() []corev1.PersistentVolumeAccessMode { @@ -762,13 +1330,30 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePVCSpec(m *dbapi.SingleIns accessMode = append(accessMode, corev1.PersistentVolumeAccessMode(m.Spec.Persistence.AccessMode)) return accessMode }(), - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ // Requests describes the minimum amount of compute resources required "storage": resource.MustParse(m.Spec.Persistence.Size), }, }, StorageClassName: &m.Spec.Persistence.StorageClass, + VolumeName: m.Spec.Persistence.DatafilesVolumeName, + Selector: func() *metav1.LabelSelector { + if m.Spec.Persistence.StorageClass != "oci" { + return nil + } + return &metav1.LabelSelector{ + MatchLabels: func() map[string]string { + ns := make(map[string]string) + if len(m.Spec.NodeSelector) != 0 { + for key, value := range m.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + } + }(), }, } // Set SingleInstanceDatabase instance as the owner and controller @@ -776,23 +1361,31 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePVCSpec(m *dbapi.SingleIns return pvc } -//############################################################################# -// Stake a claim for Persistent Volume -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) createOrReplacePVC(ctx context.Context, req ctrl.Request, +// ############################################################################# +// +// Stake a claim for Persistent Volume for customScript Volume +// +// ############################################################################# + +func (r *SingleInstanceDatabaseReconciler) createOrReplacePVCforCustomScriptsVol(ctx context.Context, req ctrl.Request, m *dbapi.SingleInstanceDatabase) (ctrl.Result, error) { - log := r.Log.WithValues("createPVC", req.NamespacedName) + log := r.Log.WithValues("createPVC CustomScripts Vol", req.NamespacedName) + + // if customScriptsVolumeName is not present or it is same than DatafilesVolumeName + if m.Spec.Persistence.ScriptsVolumeName == "" || m.Spec.Persistence.ScriptsVolumeName == m.Spec.Persistence.DatafilesVolumeName { + return requeueN, nil + } pvcDeleted := false + pvcName := string(m.Name) + "-" + string(m.Spec.Persistence.ScriptsVolumeName) // Check if the PVC already exists using r.Get, if not create a new one using r.Create pvc := &corev1.PersistentVolumeClaim{} // Get retrieves an obj ( a struct pointer ) for the given object key from the Kubernetes Cluster. - err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, pvc) + err := r.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: m.Namespace}, pvc) + if err == nil { - if *pvc.Spec.StorageClassName != m.Spec.Persistence.StorageClass || - pvc.Spec.Resources.Requests["storage"] != resource.MustParse(m.Spec.Persistence.Size) || - pvc.Spec.AccessModes[0] != corev1.PersistentVolumeAccessMode(m.Spec.Persistence.AccessMode) { + if m.Spec.Persistence.ScriptsVolumeName != "" && pvc.Spec.VolumeName != m.Spec.Persistence.ScriptsVolumeName { // call deletePods() with zero pods in avaiable and nil readyPod to delete all pods result, err := r.deletePods(ctx, req, m, []corev1.Pod{}, corev1.Pod{}, 0, 0) if result.Requeue { @@ -813,7 +1406,57 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplacePVC(ctx context.Contex } if pvcDeleted || err != nil && apierrors.IsNotFound(err) { // Define a new PVC - pvc = r.instantiatePVCSpec(m) + + // get accessMode and storage of pv mentioned to be used in pvc spec + pv := &corev1.PersistentVolume{} + pvName := m.Spec.Persistence.ScriptsVolumeName + // Get retrieves an obj ( a struct pointer ) for the given object key from the Kubernetes Cluster. + pvErr := r.Get(ctx, types.NamespacedName{Name: pvName, Namespace: m.Namespace}, pv) + if pvErr != nil { + log.Error(pvErr, "Failed to get PV") + return requeueY, pvErr + } + + volumeQty := pv.Spec.Capacity[corev1.ResourceStorage] + + AccessMode := pv.Spec.AccessModes[0] + Storage := int(volumeQty.Value()) + StorageClass := "" + + log.Info(fmt.Sprintf("PV storage: %v\n", Storage)) + log.Info(fmt.Sprintf("PV AccessMode: %v\n", AccessMode)) + + pvc := &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: m.Namespace, + Labels: map[string]string{ + "app": m.Name, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: func() []corev1.PersistentVolumeAccessMode { + var accessMode []corev1.PersistentVolumeAccessMode + accessMode = append(accessMode, corev1.PersistentVolumeAccessMode(AccessMode)) + return accessMode + }(), + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + // Requests describes the minimum amount of compute resources required + "storage": *resource.NewQuantity(int64(Storage), resource.BinarySI), + }, + }, + StorageClassName: &StorageClass, + VolumeName: pvName, + }, + } + + // Set SingleInstanceDatabase instance as the owner and controller + ctrl.SetControllerReference(m, pvc, r.Scheme) + log.Info("Creating a new PVC", "PVC.Namespace", pvc.Namespace, "PVC.Name", pvc.Name) err = r.Create(ctx, pvc) if err != nil { @@ -829,84 +1472,437 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplacePVC(ctx context.Contex return requeueN, nil } -//############################################################################# -// Create a Service for SingleInstanceDatabase -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) createOrReplaceSVC(ctx context.Context, req ctrl.Request, +// ############################################################################# +// +// Stake a claim for Persistent Volume for Datafiles Volume +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) createOrReplacePVCforDatafilesVol(ctx context.Context, req ctrl.Request, m *dbapi.SingleInstanceDatabase) (ctrl.Result, error) { - log := r.Log.WithValues("createOrReplaceSVC", req.NamespacedName) + log := r.Log.WithValues("createPVC Datafiles-Vol", req.NamespacedName) + + // Don't create PVC if persistence is not chosen + if m.Spec.Persistence.Size == "" { + return requeueN, nil + } - svcDeleted := false - // Check if the Service already exists, if not create a new one - svc := &corev1.Service{} + pvcDeleted := false + // Check if the PVC already exists using r.Get, if not create a new one using r.Create + pvc := &corev1.PersistentVolumeClaim{} // Get retrieves an obj ( a struct pointer ) for the given object key from the Kubernetes Cluster. - err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, svc) + err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, pvc) + if err == nil { - svcType := corev1.ServiceType("NodePort") - if m.Spec.LoadBalancer { - svcType = corev1.ServiceType("LoadBalancer") - } + if *pvc.Spec.StorageClassName != m.Spec.Persistence.StorageClass || + (m.Spec.Persistence.DatafilesVolumeName != "" && pvc.Spec.VolumeName != m.Spec.Persistence.DatafilesVolumeName) || + pvc.Spec.AccessModes[0] != corev1.PersistentVolumeAccessMode(m.Spec.Persistence.AccessMode) { + // PV change use cases which would trigger recreation of SIDB pods are :- + // 1. Change in storage class + // 2. Change in volume name + // 3. Change in volume access mode + + // deleting singleinstancedatabase resource + result, err := r.deletePods(ctx, req, m, []corev1.Pod{}, corev1.Pod{}, 0, 0) + if result.Requeue { + return result, err + } - if svc.Spec.Type != svcType { - log.Info("Deleting SVC", " name ", svc.Name) - err = r.Delete(ctx, svc) + // deleting persistent volume claim + log.Info("Deleting PVC", " name ", pvc.Name) + err = r.Delete(ctx, pvc) if err != nil { - r.Log.Error(err, "Failed to delete svc", " Name", svc.Name) + r.Log.Error(err, "Failed to delete Pvc", "Pvc.Name", pvc.Name) return requeueN, err } - svcDeleted = true + pvcDeleted = true + + } else if pvc.Spec.Resources.Requests["storage"] != resource.MustParse(m.Spec.Persistence.Size) { + // check the storage class of the pvc + // if the storage class doesn't support resize the throw an error event and try expanding via deleting and recreating the pv and pods + if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName == "" { + r.Recorder.Eventf(m, corev1.EventTypeWarning, "PVC not resizable", "Cannot resize pvc as storage class is either nil or default") + return requeueN, fmt.Errorf("cannot resize pvc as storage class is either nil or default") + } + + storageClassName := *pvc.Spec.StorageClassName + storageClass := &storagev1.StorageClass{} + err := r.Get(ctx, types.NamespacedName{Name: storageClassName}, storageClass) + if err != nil { + return requeueY, fmt.Errorf("error while fetching the storage class") + } + + if storageClass.AllowVolumeExpansion == nil || !*storageClass.AllowVolumeExpansion { + r.Recorder.Eventf(m, corev1.EventTypeWarning, "PVC not resizable", "The storage class doesn't support volume expansion") + return requeueN, fmt.Errorf("the storage class %s doesn't support volume expansion", storageClassName) + } + + newPVCSize := resource.MustParse(m.Spec.Persistence.Size) + newPVCSizeAdd := &newPVCSize + if newPVCSizeAdd.Cmp(pvc.Spec.Resources.Requests["storage"]) < 0 { + r.Recorder.Eventf(m, corev1.EventTypeWarning, "Cannot Resize PVC", "Forbidden: field can not be less than previous value") + return requeueN, fmt.Errorf("Resizing PVC to lower size volume not allowed") + } + + // Expanding the persistent volume claim + pvc.Spec.Resources.Requests["storage"] = resource.MustParse(m.Spec.Persistence.Size) + log.Info("Updating PVC", "pvc", pvc.Name, "volume", pvc.Spec.VolumeName) + r.Recorder.Eventf(m, corev1.EventTypeNormal, "Updating PVC - volume expansion", "Resizing the pvc for storage expansion") + err = r.Update(ctx, pvc) + if err != nil { + log.Error(err, "Error while updating the PVCs") + return requeueY, fmt.Errorf("error while updating the PVCs") + } + + } else { + + log.Info("Found Existing PVC", "Name", pvc.Name) + return requeueN, nil + } } - if svcDeleted || err != nil && apierrors.IsNotFound(err) { - // Define a new Service - svc = r.instantiateSVCSpec(m) - log.Info("Creating a new Service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) - err = r.Create(ctx, svc) + + if pvcDeleted || err != nil && apierrors.IsNotFound(err) { + // Define a new PVC + pvc = r.instantiatePVCSpec(m) + log.Info("Creating a new PVC", "PVC.Namespace", pvc.Namespace, "PVC.Name", pvc.Name) + err = r.Create(ctx, pvc) if err != nil { - log.Error(err, "Failed to create new Service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + log.Error(err, "Failed to create new PVC", "PVC.Namespace", pvc.Namespace, "PVC.Name", pvc.Name) return requeueY, err } + return requeueN, nil } else if err != nil { - log.Error(err, "Failed to get Service") + log.Error(err, "Failed to get PVC") return requeueY, err } - log.Info("Found Existing Service ", "Service Name ", svc.Name) - m.Status.ConnectString = dbcommons.ValueUnavailable - m.Status.PdbConnectString = dbcommons.ValueUnavailable - m.Status.OemExpressUrl = dbcommons.ValueUnavailable - pdbName := "ORCLPDB1" - if m.Spec.Pdbname != "" { - pdbName = strings.ToUpper(m.Spec.Pdbname) - } - if m.Spec.LoadBalancer { - m.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/" + strings.ToUpper(m.Spec.Sid) - if len(svc.Status.LoadBalancer.Ingress) > 0 { - m.Status.ConnectString = svc.Status.LoadBalancer.Ingress[0].IP + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/" + strings.ToUpper(m.Spec.Sid) - m.Status.PdbConnectString = svc.Status.LoadBalancer.Ingress[0].IP + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/" + strings.ToUpper(pdbName) - m.Status.OemExpressUrl = "https://" + svc.Status.LoadBalancer.Ingress[0].IP + ":" + fmt.Sprint(svc.Spec.Ports[1].Port) + "/em" - } - return requeueN, nil + return requeueN, nil +} + +// ############################################################################# +// +// Create Services for SingleInstanceDatabase +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) createOrReplaceSVC(ctx context.Context, req ctrl.Request, + m *dbapi.SingleInstanceDatabase) (ctrl.Result, error) { + + log := r.Log.WithValues("createOrReplaceSVC", req.NamespacedName) + + /** Two k8s services gets created: + 1. One service is ClusterIP service for cluster only communications on the listener port 1521, + 2. One service is NodePort/LoadBalancer (according to the YAML specs) for users to connect + **/ + + // clusterSvc is the cluster-wide service and extSvc is the external service for the users to connect + clusterSvc := &corev1.Service{} + extSvc := &corev1.Service{} + + clusterSvcName := m.Name + extSvcName := m.Name + "-ext" + + // svcPort is the intended port for extSvc taken from singleinstancedatabase YAML file for normal database connection + // If loadBalancer is true, it would be the listener port otherwise it would be node port + svcPort := func() int32 { + if m.Spec.ListenerPort != 0 { + return int32(m.Spec.ListenerPort) + } else { + return dbcommons.CONTAINER_LISTENER_PORT + } + }() + + // tcpsSvcPort is the intended port for extSvc taken from singleinstancedatabase YAML file for TCPS connection + // If loadBalancer is true, it would be the listener port otherwise it would be node port + tcpsSvcPort := func() int32 { + if m.Spec.TcpsListenerPort != 0 { + return int32(m.Spec.TcpsListenerPort) + } else { + return dbcommons.CONTAINER_TCPS_PORT + } + }() + + // Querying for the K8s service resources + getClusterSvcErr := r.Get(ctx, types.NamespacedName{Name: clusterSvcName, Namespace: m.Namespace}, clusterSvc) + getExtSvcErr := r.Get(ctx, types.NamespacedName{Name: extSvcName, Namespace: m.Namespace}, extSvc) + + if getClusterSvcErr != nil && apierrors.IsNotFound(getClusterSvcErr) { + // Create a new ClusterIP service + ports := []corev1.ServicePort{{Name: "listener", Port: dbcommons.CONTAINER_LISTENER_PORT, Protocol: corev1.ProtocolTCP}} + svc := r.instantiateSVCSpec(m, clusterSvcName, ports, corev1.ServiceType("ClusterIP"), true) + log.Info("Creating a new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + err := r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + return requeueY, err + } + } else if getClusterSvcErr != nil { + // Error encountered in obtaining the clusterSvc service resource + log.Error(getClusterSvcErr, "Error encountered in obtaining the service", clusterSvcName) + return requeueY, getClusterSvcErr + } + + // extSvcType defines the type of the service (LoadBalancer/NodePort) for extSvc as specified in the singleinstancedatabase.yaml file + extSvcType := corev1.ServiceType("NodePort") + if m.Spec.LoadBalancer { + extSvcType = corev1.ServiceType("LoadBalancer") + } + + isExtSvcFound := true + + if getExtSvcErr != nil && apierrors.IsNotFound(getExtSvcErr) { + isExtSvcFound = false + } else if getExtSvcErr != nil { + // Error encountered in obtaining the extSvc service resource + log.Error(getExtSvcErr, "Error encountered in obtaining the service", extSvcName) + return requeueY, getExtSvcErr + } else { + // Counting required number of ports in extSvc + requiredPorts := 2 + if m.Spec.EnableTCPS && m.Spec.ListenerPort != 0 { + requiredPorts = 3 + } + + // Obtaining all ports of the extSvc k8s service + var targetPorts []int32 + for _, port := range extSvc.Spec.Ports { + if extSvc.Spec.Type == corev1.ServiceType("LoadBalancer") { + targetPorts = append(targetPorts, port.Port) + } else if extSvc.Spec.Type == corev1.ServiceType("NodePort") { + targetPorts = append(targetPorts, port.NodePort) + } + } + + patchSvc := false + + // Conditions to determine whether to patch or not + if extSvc.Spec.Type != extSvcType || len(extSvc.Spec.Ports) != requiredPorts { + patchSvc = true + } + + if (m.Spec.ListenerPort != 0 && svcPort != targetPorts[1]) || (m.Spec.EnableTCPS && m.Spec.TcpsListenerPort != 0 && tcpsSvcPort != targetPorts[len(targetPorts)-1]) { + patchSvc = true + } + + if m.Spec.LoadBalancer { + if m.Spec.EnableTCPS { + if m.Spec.TcpsListenerPort == 0 && tcpsSvcPort != targetPorts[len(targetPorts)-1] { + patchSvc = true + } + } else { + if m.Spec.ListenerPort == 0 && svcPort != targetPorts[1] { + patchSvc = true + } + } + } else { + if m.Spec.EnableTCPS { + if m.Spec.TcpsListenerPort == 0 && tcpsSvcPort != extSvc.Spec.Ports[len(targetPorts)-1].TargetPort.IntVal { + patchSvc = true + } + } else { + if m.Spec.ListenerPort == 0 && svcPort != extSvc.Spec.Ports[1].TargetPort.IntVal { + patchSvc = true + } + } + } + + if patchSvc { + // Reset connect strings whenever patching happens + m.Status.Status = dbcommons.StatusUpdating + m.Status.ConnectString = dbcommons.ValueUnavailable + m.Status.PdbConnectString = dbcommons.ValueUnavailable + m.Status.OemExpressUrl = dbcommons.ValueUnavailable + m.Status.TcpsConnectString = dbcommons.ValueUnavailable + m.Status.TcpsPdbConnectString = dbcommons.ValueUnavailable + + // Payload formation for patching the service + var payload string + if m.Spec.LoadBalancer { + if m.Spec.EnableTCPS { + if m.Spec.ListenerPort != 0 { + payload = fmt.Sprintf(dbcommons.ThreePortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrPort, svcPort), fmt.Sprintf(dbcommons.TcpsPort, tcpsSvcPort)) + } else { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.TcpsPort, tcpsSvcPort)) + } + } else { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrPort, svcPort)) + } + } else { + if m.Spec.EnableTCPS { + if m.Spec.ListenerPort != 0 && m.Spec.TcpsListenerPort != 0 { + payload = fmt.Sprintf(dbcommons.ThreePortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrNodePort, svcPort), fmt.Sprintf(dbcommons.TcpsNodePort, tcpsSvcPort)) + } else if m.Spec.ListenerPort != 0 { + payload = fmt.Sprintf(dbcommons.ThreePortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrNodePort, svcPort), fmt.Sprintf(dbcommons.TcpsPort, tcpsSvcPort)) + } else if m.Spec.TcpsListenerPort != 0 { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.TcpsNodePort, tcpsSvcPort)) + } else { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.TcpsPort, tcpsSvcPort)) + } + } else { + if m.Spec.ListenerPort != 0 { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrNodePort, svcPort)) + } else { + payload = fmt.Sprintf(dbcommons.TwoPortPayload, extSvcType, fmt.Sprintf(dbcommons.LsnrPort, svcPort)) + } + } + } + + //Attemp Service Pathcing + log.Info("Patching the service", "Service.Name", extSvc.Name, "payload", payload) + err := dbcommons.PatchService(r.Config, m.Namespace, ctx, req, extSvcName, payload) + if err != nil { + log.Error(err, "Failed to patch Service") + } + //Requeue once after patching + return requeueY, err + } + } + + if !isExtSvcFound { + // Reset connect strings whenever extSvc is recreated + m.Status.Status = dbcommons.StatusUpdating + m.Status.ConnectString = dbcommons.ValueUnavailable + m.Status.PdbConnectString = dbcommons.ValueUnavailable + m.Status.OemExpressUrl = dbcommons.ValueUnavailable + m.Status.TcpsConnectString = dbcommons.ValueUnavailable + m.Status.TcpsPdbConnectString = dbcommons.ValueUnavailable + + // New service has to be created + ports := []corev1.ServicePort{ + { + Name: "xmldb", + Port: 5500, + Protocol: corev1.ProtocolTCP, + }, + } + + if m.Spec.LoadBalancer { + if m.Spec.EnableTCPS { + if m.Spec.ListenerPort != 0 { + ports = append(ports, corev1.ServicePort{ + Name: "listener", + Protocol: corev1.ProtocolTCP, + Port: svcPort, + TargetPort: intstr.FromInt(int(dbcommons.CONTAINER_LISTENER_PORT)), + }) + } + ports = append(ports, corev1.ServicePort{ + Name: "listener-tcps", + Protocol: corev1.ProtocolTCP, + Port: tcpsSvcPort, + TargetPort: intstr.FromInt(int(dbcommons.CONTAINER_TCPS_PORT)), + }) + } else { + ports = append(ports, corev1.ServicePort{ + Name: "listener", + Protocol: corev1.ProtocolTCP, + Port: svcPort, + TargetPort: intstr.FromInt(int(dbcommons.CONTAINER_LISTENER_PORT)), + }) + } + } else { + if m.Spec.EnableTCPS { + if m.Spec.ListenerPort != 0 { + ports = append(ports, corev1.ServicePort{ + Name: "listener", + Protocol: corev1.ProtocolTCP, + Port: dbcommons.CONTAINER_LISTENER_PORT, + NodePort: svcPort, + }) + } + ports = append(ports, corev1.ServicePort{ + Name: "listener-tcps", + Protocol: corev1.ProtocolTCP, + Port: dbcommons.CONTAINER_TCPS_PORT, + }) + if m.Spec.TcpsListenerPort != 0 { + ports[len(ports)-1].NodePort = tcpsSvcPort + } + } else { + ports = append(ports, corev1.ServicePort{ + Name: "listener", + Protocol: corev1.ProtocolTCP, + Port: dbcommons.CONTAINER_LISTENER_PORT, + }) + if m.Spec.ListenerPort != 0 { + ports[len(ports)-1].NodePort = svcPort + } + } + } + + // Create the service + svc := r.instantiateSVCSpec(m, extSvcName, ports, extSvcType, false) + log.Info("Creating a new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + err := r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) + return requeueY, err + } + extSvc = svc } - m.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/" + strings.ToUpper(m.Spec.Sid) - nodeip := dbcommons.GetNodeIp(r, ctx, req) - if nodeip != "" { - m.Status.ConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/" + strings.ToUpper(m.Spec.Sid) - m.Status.PdbConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/" + strings.ToUpper(pdbName) - m.Status.OemExpressUrl = "https://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[1].NodePort) + "/em" + var sid, pdbName string + var getSidPdbEditionErr error + if m.Spec.Image.PrebuiltDB { + r.Log.Info("Initiliazing database sid, pdb, edition for prebuilt database") + var edition string + sid, pdbName, edition, getSidPdbEditionErr = dbcommons.GetSidPdbEdition(r, r.Config, ctx, ctrl.Request{NamespacedName: types.NamespacedName{Namespace: m.Namespace, Name: m.Name}}) + if errors.Is(getSidPdbEditionErr, dbcommons.ErrNoReadyPod) { + return requeueN, nil + } + if getSidPdbEditionErr != nil { + return requeueY, getSidPdbEditionErr + } + r.Log.Info(fmt.Sprintf("Prebuilt database: %s has SID : %s, PDB : %s, EDITION: %s", m.Name, sid, pdbName, edition)) + m.Status.Edition = cases.Title(language.English).String(edition) + } + if sid == "" { + sid = strings.ToUpper(m.Spec.Sid) + } + if pdbName == "" { + pdbName = strings.ToUpper(m.Spec.Pdbname) + } + if m.Spec.LoadBalancer { + m.Status.ClusterConnectString = extSvc.Name + "." + extSvc.Namespace + ":" + fmt.Sprint(extSvc.Spec.Ports[1].Port) + "/" + strings.ToUpper(sid) + if len(extSvc.Status.LoadBalancer.Ingress) > 0 { + // 'lbAddress' will contain the Fully Qualified Hostname of the LB. If the hostname is not available it will contain the IP address of the LB + lbAddress := extSvc.Status.LoadBalancer.Ingress[0].Hostname + if lbAddress == "" { + lbAddress = extSvc.Status.LoadBalancer.Ingress[0].IP + } + m.Status.ConnectString = lbAddress + ":" + fmt.Sprint(extSvc.Spec.Ports[1].Port) + "/" + strings.ToUpper(sid) + m.Status.PdbConnectString = lbAddress + ":" + fmt.Sprint(extSvc.Spec.Ports[1].Port) + "/" + strings.ToUpper(pdbName) + oemExpressUrl = "https://" + lbAddress + ":" + fmt.Sprint(extSvc.Spec.Ports[0].Port) + "/em" + if m.Spec.EnableTCPS { + m.Status.TcpsConnectString = lbAddress + ":" + fmt.Sprint(extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].Port) + "/" + strings.ToUpper(sid) + m.Status.TcpsPdbConnectString = lbAddress + ":" + fmt.Sprint(extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].Port) + "/" + strings.ToUpper(pdbName) + } + } + } else { + m.Status.ClusterConnectString = extSvc.Name + "." + extSvc.Namespace + ":" + fmt.Sprint(extSvc.Spec.Ports[1].Port) + "/" + strings.ToUpper(sid) + nodeip := dbcommons.GetNodeIp(r, ctx, req) + if nodeip != "" { + m.Status.ConnectString = nodeip + ":" + fmt.Sprint(extSvc.Spec.Ports[1].NodePort) + "/" + strings.ToUpper(sid) + m.Status.PdbConnectString = nodeip + ":" + fmt.Sprint(extSvc.Spec.Ports[1].NodePort) + "/" + strings.ToUpper(pdbName) + oemExpressUrl = "https://" + nodeip + ":" + fmt.Sprint(extSvc.Spec.Ports[0].NodePort) + "/em" + if m.Spec.EnableTCPS { + m.Status.TcpsConnectString = nodeip + ":" + fmt.Sprint(extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].NodePort) + "/" + strings.ToUpper(sid) + m.Status.TcpsPdbConnectString = nodeip + ":" + fmt.Sprint(extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].NodePort) + "/" + strings.ToUpper(pdbName) + } + } } return requeueN, nil } -//############################################################################# -// Create new Pods or delete old/extra pods -// m = SingleInstanceDatabase -// n = CloneFromDatabase -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) createOrReplacePods(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase, +// ############################################################################# +// +// Create new Pods or delete old/extra pods +// m = SingleInstanceDatabase +// n = CloneFromDatabase +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) createOrReplacePods(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase, rp *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("createOrReplacePods", req.NamespacedName) @@ -914,20 +1910,26 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplacePods(m *dbapi.SingleIn oldImage := "" // call FindPods() to fetch pods all version/images of the same SIDB kind - readyPod, replicasFound, available, podsMarkedToBeDeleted, err := dbcommons.FindPods(r, "", "", m.Name, m.Namespace, ctx, req) + readyPod, replicasFound, allAvailable, podsMarkedToBeDeleted, err := dbcommons.FindPods(r, "", "", m.Name, m.Namespace, ctx, req) if err != nil { log.Error(err, err.Error()) return requeueY, err } - if m.Spec.Edition == "express" && podsMarkedToBeDeleted > 0 { - // Recreate new pods only after earlier pods are terminated completely - return requeueY, err + + // Recreate new pods only after earlier pods are terminated completely + for i := 0; i < len(podsMarkedToBeDeleted); i++ { + r.Log.Info("Force deleting pod ", "name", podsMarkedToBeDeleted[i].Name, "phase", podsMarkedToBeDeleted[i].Status.Phase) + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + r.Delete(ctx, &podsMarkedToBeDeleted[i], &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) } + if readyPod.Name != "" { - available = append(available, readyPod) + allAvailable = append(allAvailable, readyPod) } - for _, pod := range available { + for _, pod := range allAvailable { if pod.Labels["version"] != m.Spec.Image.Version { oldVersion = pod.Labels["version"] } @@ -943,76 +1945,160 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplacePods(m *dbapi.SingleIn if !imageChanged { eventReason := "" eventMsg := "" - if replicasFound == m.Spec.Replicas { - return requeueN, nil + if replicasFound > m.Spec.Replicas { + eventReason = "Scaling in pods" + eventMsg = "from " + strconv.Itoa(replicasFound) + " to " + strconv.Itoa(m.Spec.Replicas) + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + // Delete extra PODs + return r.deletePods(ctx, req, m, allAvailable, readyPod, replicasFound, m.Spec.Replicas) } - if replicasFound < m.Spec.Replicas { - if replicasFound != 0 { - eventReason = "Scaling Out" - eventMsg = "from " + strconv.Itoa(replicasFound) + " pods to " + strconv.Itoa(m.Spec.Replicas) + if replicasFound != 0 { + if replicasFound == 1 { + if m.Status.DatafilesCreated != "true" { + log.Info("No datafiles created, single replica found, creating wallet") + // Creation of Oracle Wallet for Single Instance Database credentials + r.createWallet(m, ctx, req) + } + } + if ok, _ := dbcommons.IsAnyPodWithStatus(allAvailable, corev1.PodRunning); !ok { + eventReason = "Database Pending" + eventMsg = "waiting for a pod to get to running state" + log.Info(eventMsg) r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + for i := 0; i < len(allAvailable); i++ { + r.Log.Info("Pod status: ", "name", allAvailable[i].Name, "phase", allAvailable[i].Status.Phase) + waitingReason := "" + var stateWaiting *corev1.ContainerStateWaiting + if len(allAvailable[i].Status.InitContainerStatuses) > 0 { + stateWaiting = allAvailable[i].Status.InitContainerStatuses[0].State.Waiting + } else if len(allAvailable[i].Status.ContainerStatuses) > 0 { + stateWaiting = allAvailable[i].Status.ContainerStatuses[0].State.Waiting + } + if stateWaiting != nil { + waitingReason = stateWaiting.Reason + } + if waitingReason == "" { + continue + } + r.Log.Info("Pod unavailable reason: ", "reason", waitingReason) + if strings.Contains(waitingReason, "ImagePullBackOff") || strings.Contains(waitingReason, "ErrImagePull") { + r.Log.Info("Deleting pod", "name", allAvailable[i].Name) + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + r.Delete(ctx, &allAvailable[i], &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + } + } + return requeueY, err } - // If version is same , call createPods() with the same version , and no of Replicas required - return r.createPods(m, n, ctx, req, replicasFound) } - eventReason = "Scaling In" - eventMsg = "from " + strconv.Itoa(replicasFound) + " pods to " + strconv.Itoa(m.Spec.Replicas) - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - // Delete extra PODs - return r.deletePods(ctx, req, m, available, readyPod, replicasFound, m.Spec.Replicas) + if replicasFound == m.Spec.Replicas { + return requeueN, nil + } + if replicasFound != 0 && replicasFound < m.Spec.Replicas { + eventReason = "Scaling out pods" + eventMsg = "from " + strconv.Itoa(replicasFound) + " to " + strconv.Itoa(m.Spec.Replicas) + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + } + // If version is same , call createPods() with the same version , and no of Replicas required + return r.createPods(m, n, rp, ctx, req, replicasFound, false) } // Version/Image changed // PATCHING START (Only Software Patch) - // call FindPods() to find pods of newer version . if running , delete the older version replicas. - readyPod, replicasFound, available, _, err = dbcommons.FindPods(r, m.Spec.Image.Version, - m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + log.Info("Pod image change detected, datapatch to be rerun...") + m.Status.DatafilesPatched = "false" + // call FindPods() to find pods of older version. Delete all the Pods + readyPod, oldReplicasFound, oldAvailable, _, err := dbcommons.FindPods(r, oldVersion, + oldImage, m.Name, m.Namespace, ctx, req) if err != nil { log.Error(err, err.Error()) - return requeueY, nil + return requeueY, err + } + if readyPod.Name != "" { + log.Info("Ready pod marked for deletion", "name", readyPod.Name) + oldAvailable = append(oldAvailable, readyPod) } - // create new Pods with the new Version and no.of Replicas required - result, err := r.createPods(m, n, ctx, req, replicasFound) - if result.Requeue { - return result, err + if m.Status.Replicas == 1 { + r.deletePods(ctx, req, m, oldAvailable, corev1.Pod{}, oldReplicasFound, 0) } + // call FindPods() to find pods of newer version . if running , delete the older version replicas. + readyPod, newReplicasFound, newAvailable, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return requeueY, nil + } // Findpods() only returns non ready pods if readyPod.Name != "" { log.Info("New ready pod found", "name", readyPod.Name) - available = append(available, readyPod) + newAvailable = append(newAvailable, readyPod) } - if ok, _ := dbcommons.IsAnyPodWithStatus(available, corev1.PodRunning); !ok { - eventReason := "Database Pending" - eventMsg := "waiting for newer version/image DB pods get to running state" - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - log.Info(eventMsg) - return requeueY, errors.New(eventMsg) + + if newReplicasFound != 0 { + if ok, _ := dbcommons.IsAnyPodWithStatus(newAvailable, corev1.PodRunning); !ok { + eventReason := "Database Pending" + eventMsg := "waiting for pod with changed image to get to running state" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(eventMsg) + + for i := 0; i < len(newAvailable); i++ { + r.Log.Info("Pod status: ", "name", newAvailable[i].Name, "phase", newAvailable[i].Status.Phase) + waitingReason := "" + var stateWaiting *corev1.ContainerStateWaiting + if len(newAvailable[i].Status.InitContainerStatuses) > 0 { + stateWaiting = newAvailable[i].Status.InitContainerStatuses[0].State.Waiting + } else if len(newAvailable[i].Status.ContainerStatuses) > 0 { + stateWaiting = newAvailable[i].Status.ContainerStatuses[0].State.Waiting + } + if stateWaiting != nil { + waitingReason = stateWaiting.Reason + } + if waitingReason == "" { + continue + } + r.Log.Info("Pod unavailable reason: ", "reason", waitingReason) + if strings.Contains(waitingReason, "ImagePullBackOff") || strings.Contains(waitingReason, "ErrImagePull") { + r.Log.Info("Deleting pod", "name", newAvailable[i].Name) + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + r.Delete(ctx, &newAvailable[i], &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + } + } + return requeueY, errors.New(eventMsg) + } } - // call FindPods() to find pods of older version . delete all the Pods - readyPod, replicasFound, available, _, err = dbcommons.FindPods(r, oldVersion, - oldImage, m.Name, m.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err + // create new Pods with the new Version and no.of Replicas required + // if m.Status.Replicas > 1, then it is replica based patching + result, err := r.createPods(m, n, rp, ctx, req, newReplicasFound, m.Status.Replicas > 1) + if result.Requeue { + return result, err } - if readyPod.Name != "" { - log.Info("Ready pod marked for deletion", "name", readyPod.Name) - available = append(available, readyPod) + if m.Status.Replicas == 1 { + return requeueN, nil } - return r.deletePods(ctx, req, m, available, corev1.Pod{}, replicasFound, 0) + return r.deletePods(ctx, req, m, oldAvailable, corev1.Pod{}, oldReplicasFound, 0) // PATCHING END } -//############################################################################# -// Function for creating Oracle Wallet -//############################################################################# +// ############################################################################# +// +// Function for creating Oracle Wallet +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) createWallet(m *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // Wallet not supported for XE Database - if m.Spec.Edition == "express" { + // Wallet not supported for Express/Free Database + if m.Spec.Edition == "express" || m.Spec.Edition == "free" { + return requeueN, nil + } + + // No Wallet for Pre-built db + if m.Spec.Image.PrebuiltDB { return requeueN, nil } @@ -1033,12 +2119,12 @@ func (r *SingleInstanceDatabaseReconciler) createWallet(m *dbapi.SingleInstanceD return requeueY, nil } - // Iterate through the avaialableFinal (list of pods) to find out the pod whose status is updated about the init containers + // Iterate through the availableFinal (list of pods) to find out the pod whose status is updated about the init containers // If no required pod found then requeue the reconcile request var pod corev1.Pod var podFound bool for _, pod = range availableFinal { - // Check if pod status contianer is updated about init containers + // Check if pod status container is updated about init containers if len(pod.Status.InitContainerStatuses) > 0 { podFound = true break @@ -1063,7 +2149,7 @@ func (r *SingleInstanceDatabaseReconciler) createWallet(m *dbapi.SingleInstanceD return requeueY, nil } - if m.Spec.CloneFrom == "" && m.Spec.Edition != "express" { + if m.Spec.CreateAs != "clone" && m.Spec.Edition != "express" { //Check if Edition of m.Spec.Sid is same as m.Spec.Edition getEditionFile := dbcommons.GetEnterpriseEditionFileCMD eventReason := m.Spec.Sid + " is a enterprise edition" @@ -1077,9 +2163,9 @@ func (r *SingleInstanceDatabaseReconciler) createWallet(m *dbapi.SingleInstanceD if err == nil && out != "" { m.Status.Status = dbcommons.StatusError - eventMsg := "wrong edition" + eventMsg := "incorrect database edition" r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - return requeueN, errors.New("wrong Edition") + return requeueY, errors.New(eventMsg) } } @@ -1117,13 +2203,16 @@ func (r *SingleInstanceDatabaseReconciler) createWallet(m *dbapi.SingleInstanceD return requeueN, nil } -//############################################################################# -// Create the requested POD replicas -// m = SingleInstanceDatabase -// n = CloneFromDatabase -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) createPods(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase, - ctx context.Context, req ctrl.Request, replicasFound int) (ctrl.Result, error) { +// ############################################################################## +// +// Create the requested POD replicas +// m = SingleInstanceDatabase +// n = CloneFromDatabase +// patching = Boolean variable to differentiate normal usecase with patching +// +// ############################################################################## +func (r *SingleInstanceDatabaseReconciler) createPods(m *dbapi.SingleInstanceDatabase, n *dbapi.SingleInstanceDatabase, rp *dbapi.SingleInstanceDatabase, + ctx context.Context, req ctrl.Request, replicasFound int, replicaPatching bool) (ctrl.Result, error) { log := r.Log.WithValues("createPods", req.NamespacedName) @@ -1133,25 +2222,29 @@ func (r *SingleInstanceDatabaseReconciler) createPods(m *dbapi.SingleInstanceDat log.Info("No of " + m.Name + " replicas found are same as required") return requeueN, nil } + firstPod := false if replicasFound == 0 { m.Status.Status = dbcommons.StatusPending - m.Status.DatafilesCreated = "false" - m.Status.DatafilesPatched = "false" - m.Status.Role = dbcommons.ValueUnavailable - m.Status.ConnectString = dbcommons.ValueUnavailable - m.Status.PdbConnectString = dbcommons.ValueUnavailable - m.Status.OemExpressUrl = dbcommons.ValueUnavailable - m.Status.ReleaseUpdate = dbcommons.ValueUnavailable + firstPod = true + } + if !replicaPatching { + m.Status.Replicas = replicasFound } - // if Found < Required , Create New Pods , Name of Pods are generated Randomly + // if Found < Required, create new pods, name of pods are generated randomly for i := replicasFound; i < replicasReq; i++ { - pod := r.instantiatePodSpec(m, n) + // mandatory pod affinity if it is replica based patching or not the first pod + pod := r.instantiatePodSpec(m, n, rp, replicaPatching || !firstPod) log.Info("Creating a new "+m.Name+" POD", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) err := r.Create(ctx, pod) if err != nil { log.Error(err, "Failed to create new "+m.Name+" POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) return requeueY, err } + m.Status.Replicas += 1 + if firstPod { + log.Info("Requeue for first pod to get to running state", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) + return requeueY, err + } } readyPod, _, availableFinal, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, @@ -1164,8 +2257,6 @@ func (r *SingleInstanceDatabaseReconciler) createPods(m *dbapi.SingleInstanceDat availableFinal = append(availableFinal, readyPod) } - m.Status.Replicas = m.Spec.Replicas - podNamesFinal := dbcommons.GetPodNames(availableFinal) log.Info("Final "+m.Name+" Pods After Deleting (or) Adding Extra Pods ( Including The Ready Pod ) ", "Pod Names", podNamesFinal) log.Info(m.Name+" Replicas Available", "Count", len(podNamesFinal)) @@ -1174,11 +2265,13 @@ func (r *SingleInstanceDatabaseReconciler) createPods(m *dbapi.SingleInstanceDat return requeueN, nil } -//############################################################################# -// Create the requested POD replicas -// m = SingleInstanceDatabase -// n = CloneFromDatabase -//############################################################################# +// ############################################################################# +// +// Create the requested POD replicas +// m = SingleInstanceDatabase +// n = CloneFromDatabase +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) deletePods(ctx context.Context, req ctrl.Request, m *dbapi.SingleInstanceDatabase, available []corev1.Pod, readyPod corev1.Pod, replicasFound int, replicasRequired int) (ctrl.Result, error) { log := r.Log.WithValues("deletePods", req.NamespacedName) @@ -1199,7 +2292,7 @@ func (r *SingleInstanceDatabaseReconciler) deletePods(ctx context.Context, req c } } - // For deleting all pods , call with readyPod as nil ( corev1.Pod{} ) and append readyPod to avaiable while calling deletePods() + // For deleting all pods , call with readyPod as nil ( corev1.Pod{} ) and append readyPod to available while calling deletePods() // if Found > Required , Delete Extra Pods if replicasFound > len(available) { // if available does not contain readyPOD, add it @@ -1208,132 +2301,151 @@ func (r *SingleInstanceDatabaseReconciler) deletePods(ctx context.Context, req c noDeleted := 0 for _, availablePod := range available { - if readyPod.Name == availablePod.Name { + if readyPod.Name == availablePod.Name && m.Spec.Replicas != 0 { continue } if replicasRequired == (len(available) - noDeleted) { break } r.Log.Info("Deleting Pod : ", "POD.NAME", availablePod.Name) - err := r.Delete(ctx, &availablePod, &client.DeleteOptions{}) + var delOpts *client.DeleteOptions = &client.DeleteOptions{} + if replicasRequired == 0 { + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + delOpts.GracePeriodSeconds = &gracePeriodSeconds + delOpts.PropagationPolicy = &policy + } + err := r.Delete(ctx, &availablePod, delOpts) noDeleted += 1 if err != nil { r.Log.Error(err, "Failed to delete existing POD", "POD.Name", availablePod.Name) // Don't requeue + } else { + m.Status.Replicas -= 1 } } - m.Status.Replicas = m.Spec.Replicas - return requeueN, nil } -//############################################################################# -// ValidateDBReadiness and return the ready POD -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) validateDBReadiness(m *dbapi.SingleInstanceDatabase, +// ############################################################################# +// +// ValidateDBReadiness and return the ready POD +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) validateDBReadiness(sidb *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, corev1.Pod, error) { - readyPod, _, available, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, - m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + log := r.Log.WithValues("validateDBReadiness", req.NamespacedName) + + log.Info("Validating readiness for database") + + sidbReadyPod, _, available, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) if err != nil { r.Log.Error(err, err.Error()) - return requeueY, readyPod, err + return requeueY, sidbReadyPod, err } - if readyPod.Name == "" { - eventReason := "Database Pending" - eventMsg := "waiting for database pod to be ready" - m.Status.Status = dbcommons.StatusPending + + if sidbReadyPod.Name == "" { + sidb.Status.Status = dbcommons.StatusPending + log.Info("no pod currently in ready state") if ok, _ := dbcommons.IsAnyPodWithStatus(available, corev1.PodFailed); ok { - eventReason = "Database Failed" - eventMsg = "pod creation failed" - } else if ok, runningPod := dbcommons.IsAnyPodWithStatus(available, corev1.PodRunning); ok { - eventReason = "Database Creating" - eventMsg = "waiting for database to be ready" - m.Status.Status = dbcommons.StatusCreating - if m.Spec.Edition == "express" { - eventReason = "Database Unhealthy" - m.Status.Status = dbcommons.StatusNotReady - } - out, err := dbcommons.ExecCommand(r, r.Config, runningPod.Name, runningPod.Namespace, "", + eventReason := "Database Failed" + eventMsg := "pod creation failed" + r.Recorder.Eventf(sidb, corev1.EventTypeNormal, eventReason, eventMsg) + } else if ok, _ := dbcommons.IsAnyPodWithStatus(available, corev1.PodRunning); ok { + + out, err := dbcommons.ExecCommand(r, r.Config, available[0].Name, sidb.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.GetCheckpointFileCMD) if err != nil { - r.Log.Error(err, err.Error()) - return requeueY, readyPod, err + r.Log.Info(err.Error()) } - r.Log.Info("GetCheckpointFileCMD Output : \n" + out) if out != "" { - eventReason = "Database Unhealthy" - eventMsg = "datafiles exists" - m.Status.DatafilesCreated = "true" - m.Status.Status = dbcommons.StatusNotReady + log.Info("Database initialzied") + eventReason := "Database Unhealthy" + eventMsg := "datafiles exists" + r.Recorder.Eventf(sidb, corev1.EventTypeNormal, eventReason, eventMsg) + sidb.Status.DatafilesCreated = "true" + sidb.Status.Status = dbcommons.StatusNotReady + r.updateORDSStatus(sidb, ctx, req) + } else { + log.Info("Database Creating....", "Name", sidb.Name) + sidb.Status.Status = dbcommons.StatusCreating } + } else { + log.Info("Database Pending....", "Name", sidb.Name) } - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - r.Log.Info(eventMsg) - // As No pod is ready now , turn on mode when pod is ready . so requeue the request - return requeueY, readyPod, errors.New(eventMsg) + log.Info("no pod currently in ready state") + return requeueY, sidbReadyPod, nil } - if m.Status.DatafilesPatched != "true" { - eventReason := "Datapatch Pending" - eventMsg := "datapatch execution pending" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - } - available = append(available, readyPod) - podNamesFinal := dbcommons.GetPodNames(available) - r.Log.Info("Final "+m.Name+" Pods After Deleting (or) Adding Extra Pods ( Including The Ready Pod ) ", "Pod Names", podNamesFinal) - r.Log.Info(m.Name+" Replicas Available", "Count", len(podNamesFinal)) - r.Log.Info(m.Name+" Replicas Required", "Count", m.Spec.Replicas) - - eventReason := "Database Ready" - eventMsg := "database open on pod " + readyPod.Name + " scheduled on node " + readyPod.Status.HostIP - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - m.Status.DatafilesCreated = "true" - - // DB is ready, fetch and update other info - out, err := dbcommons.GetDatabaseRole(readyPod, r, r.Config, ctx, req, m.Spec.Edition) - if err == nil { - m.Status.Role = strings.ToUpper(out) - } - version, out, err := dbcommons.GetDatabaseVersion(readyPod, r, r.Config, ctx, req, m.Spec.Edition) - if err == nil { - if !strings.Contains(out, "ORA-") && m.Status.DatafilesPatched != "true" { - m.Status.ReleaseUpdate = version + if sidb.Spec.CreateAs == "clone" { + // Required since clone creates the datafiles under primary database SID folder + r.Log.Info("Creating the SID directory link for clone database", "name", sidb.Spec.Sid) + _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", dbcommons.CreateSIDlinkCMD) + if err != nil { + r.Log.Info(err.Error()) } } - if m.Spec.Edition == "express" { - //Configure OEM Express Listener - out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, - "bash", "-c", fmt.Sprintf("echo -e \"%s\" | su -p oracle -c \"sqlplus -s / as sysdba\" ", dbcommons.ConfigureOEMSQL)) - if err != nil { - r.Log.Error(err, err.Error()) - return requeueY, readyPod, err - } - r.Log.Info("ConfigureOEMSQL output") - r.Log.Info(out) + version, err := dbcommons.GetDatabaseVersion(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return requeueY, sidbReadyPod, err + } + dbMajorVersion, err := strconv.Atoi(strings.Split(version, ".")[0]) + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, sidbReadyPod, err + } + r.Log.Info("DB Major Version is " + strconv.Itoa(dbMajorVersion)) + // Validating that free edition of the database is only supported from database 23c onwards + if sidb.Spec.Edition == "free" && dbMajorVersion < 23 { + errMsg := "the Oracle Database Free is only available from version 23c onwards" + r.Recorder.Eventf(sidb, corev1.EventTypeWarning, "Spec Error", errMsg) + sidb.Status.Status = dbcommons.StatusError + return requeueY, sidbReadyPod, errors.New(errMsg) } - return requeueN, readyPod, nil + available = append(available, sidbReadyPod) + podNamesFinal := dbcommons.GetPodNames(available) + r.Log.Info("Final "+sidb.Name+" Pods After Deleting (or) Adding Extra Pods ( Including The Ready Pod ) ", "Pod Names", podNamesFinal) + r.Log.Info(sidb.Name+" Replicas Available", "Count", len(podNamesFinal)) + r.Log.Info(sidb.Name+" Replicas Required", "Count", sidb.Spec.Replicas) + + eventReason := "Database Ready" + eventMsg := "database open on pod " + sidbReadyPod.Name + " scheduled on node " + sidbReadyPod.Status.HostIP + r.Recorder.Eventf(sidb, corev1.EventTypeNormal, eventReason, eventMsg) + + sidb.Status.CreatedAs = sidb.Spec.CreateAs + + return requeueN, sidbReadyPod, nil } -//############################################################################# -// Function for deleting the Oracle Wallet -//############################################################################# +// ############################################################################# +// +// Function for deleting the Oracle Wallet +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) deleteWallet(m *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // Wallet not supported for XE Database - if m.Spec.Edition == "express" { + // Wallet not supported for Express/Free Database + if m.Spec.Edition == "express" || m.Spec.Edition == "free" { + return requeueN, nil + } + + // No Wallet for Pre-built db + if m.Spec.Image.PrebuiltDB { return requeueN, nil } // Deleting the secret and then deleting the wallet // If the secret is not found it means that the secret and wallet both are deleted, hence no need to requeue - if !m.Spec.AdminPassword.KeepSecret { + if m.Spec.AdminPassword.KeepSecret != nil && !*m.Spec.AdminPassword.KeepSecret { r.Log.Info("Querying the database secret ...") secret := &corev1.Secret{} err := r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, secret) @@ -1364,24 +2476,240 @@ func (r *SingleInstanceDatabaseReconciler) deleteWallet(m *dbapi.SingleInstanceD return requeueN, nil } -//############################################################################# -// Execute Datapatch -//############################################################################# -func (r *SingleInstanceDatabaseReconciler) runDatapatch(m *dbapi.SingleInstanceDatabase, - readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// ############################################################################# +// +// Updating clientWallet when TCPS is enabled +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) updateClientWallet(m *dbapi.SingleInstanceDatabase, + readyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + // Updation of tnsnames.ora in clientWallet for HOST and PORT fields + extSvc := &corev1.Service{} + extSvcName := m.Name + "-ext" + getExtSvcErr := r.Get(ctx, types.NamespacedName{Name: extSvcName, Namespace: m.Namespace}, extSvc) + + if getExtSvcErr == nil { + var host string + var port int32 + if m.Spec.LoadBalancer { + if len(extSvc.Status.LoadBalancer.Ingress) > 0 { + host = extSvc.Status.LoadBalancer.Ingress[0].Hostname + if host == "" { + host = extSvc.Status.LoadBalancer.Ingress[0].IP + } + port = extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].Port + } + } else { + host = dbcommons.GetNodeIp(r, ctx, req) + if host != "" { + port = extSvc.Spec.Ports[len(extSvc.Spec.Ports)-1].NodePort + } + } - // Datapatch not supported for XE Database - if m.Spec.Edition == "express" { - return requeueN, nil - } + r.Log.Info("Updating the client wallet...") + _, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.ClientWalletUpdate, host, port)) + if err != nil { + r.Log.Error(err, err.Error()) + return err + } - m.Status.Status = dbcommons.StatusPatching - r.Status().Update(ctx, m) - eventReason := "Datapatch Executing" - eventMsg := "datapatch begin execution" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + } else { + r.Log.Info("Unable to get the service while updating the clientWallet", "Service.Namespace", extSvc.Namespace, "Service.Name", extSvcName) + return getExtSvcErr + } + return nil +} - //RUN DATAPATCH +// ############################################################################# +// +// Configuring TCPS +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) configTcps(m *dbapi.SingleInstanceDatabase, + readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + eventReason := "Configuring TCPS" + + if (m.Spec.EnableTCPS) && + ((!m.Status.IsTcpsEnabled) || // TCPS Enabled from a TCP state + (m.Spec.TcpsTlsSecret != "" && m.Status.TcpsTlsSecret == "") || // TCPS Secret is added in spec + (m.Spec.TcpsTlsSecret == "" && m.Status.TcpsTlsSecret != "") || // TCPS Secret is removed in spec + (m.Spec.TcpsTlsSecret != "" && m.Status.TcpsTlsSecret != "" && m.Spec.TcpsTlsSecret != m.Status.TcpsTlsSecret)) { //TCPS secret is changed + + // Set status to Updating, except when an error has been thrown from configTCPS script + if m.Status.Status != dbcommons.StatusError { + m.Status.Status = dbcommons.StatusUpdating + } + r.Status().Update(ctx, m) + + eventMsg := "Enabling TCPS in the database..." + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + + var TcpsCommand = dbcommons.EnableTcpsCMD + if m.Spec.TcpsTlsSecret != "" { // case when tls secret is either added or changed + TcpsCommand = "export TCPS_CERTS_LOCATION=" + dbcommons.TlsCertsLocation + " && " + dbcommons.EnableTcpsCMD + + // Checking for tls-secret mount in pods + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.PodMountsCmd, dbcommons.TlsCertsLocation)) + r.Log.Info("Mount Check Output") + r.Log.Info(out) + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, nil + } + + if (m.Status.TcpsTlsSecret != "") || // case when TCPS Secret is changed + (!strings.Contains(out, dbcommons.TlsCertsLocation)) { // if mount is not there in pod + // call deletePods() with zero pods in avaiable and nil readyPod to delete all pods + result, err := r.deletePods(ctx, req, m, []corev1.Pod{}, corev1.Pod{}, 0, 0) + if result.Requeue { + return result, err + } + m.Status.TcpsTlsSecret = "" // to avoid reconciled pod deletions, in case of TCPS secret change and it fails + } + } + + // Enable TCPS + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", TcpsCommand) + if err != nil { + r.Log.Error(err, err.Error()) + eventMsg = "Error encountered in enabling TCPS!" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + m.Status.Status = dbcommons.StatusError + r.Status().Update(ctx, m) + return requeueY, nil + } + r.Log.Info("enableTcps Output : \n" + out) + // Updating the Status and publishing the event + m.Status.CertCreationTimestamp = time.Now().Format(time.RFC3339) + m.Status.IsTcpsEnabled = true + m.Status.ClientWalletLoc = fmt.Sprintf(dbcommons.ClientWalletLocation, m.Spec.Sid) + // m.Spec.TcpsTlsSecret can be empty or non-empty + // Store secret name in case of tls-secret addition or change, otherwise would be "" + if m.Spec.TcpsTlsSecret != "" { + m.Status.TcpsTlsSecret = m.Spec.TcpsTlsSecret + } else { + m.Status.TcpsTlsSecret = "" + } + + r.Status().Update(ctx, m) + + eventMsg = "TCPS Enabled." + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + + requeueDuration, _ := time.ParseDuration(m.Spec.TcpsCertRenewInterval) + requeueDuration += func() time.Duration { requeueDuration, _ := time.ParseDuration("1s"); return requeueDuration }() + futureRequeue = ctrl.Result{Requeue: true, RequeueAfter: requeueDuration} + + // update clientWallet + err = r.updateClientWallet(m, readyPod, ctx, req) + if err != nil { + r.Log.Error(err, "Error in updating tnsnames.ora in clientWallet...") + return requeueY, nil + } + } else if !m.Spec.EnableTCPS && m.Status.IsTcpsEnabled { + // Disable TCPS + m.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, m) + + eventMsg := "Disabling TCPS in the database..." + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", dbcommons.DisableTcpsCMD) + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, nil + } + r.Log.Info("disable TCPS Output : \n" + out) + // Updating the Status and publishing the event + m.Status.CertCreationTimestamp = "" + m.Status.IsTcpsEnabled = false + m.Status.ClientWalletLoc = "" + m.Status.TcpsTlsSecret = "" + + r.Status().Update(ctx, m) + + eventMsg = "TCPS Disabled." + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + + } else if m.Spec.EnableTCPS && m.Status.IsTcpsEnabled && m.Spec.TcpsCertRenewInterval != "" { + // Cert Renewal Logic + certCreationTimestamp, _ := time.Parse(time.RFC3339, m.Status.CertCreationTimestamp) + duration := time.Since(certCreationTimestamp) + allowdDuration, _ := time.ParseDuration(m.Spec.TcpsCertRenewInterval) + if duration > allowdDuration { + m.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, m) + + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.EnableTcpsCMD)) + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, nil + } + r.Log.Info("Cert Renewal Output : \n" + out) + // Updating the Status and publishing the event + m.Status.CertCreationTimestamp = time.Now().Format(time.RFC3339) + r.Status().Update(ctx, m) + + eventMsg := "TCPS Certificates Renewed at time %s," + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg, time.Now().Format(time.RFC3339)) + + requeueDuration, _ := time.ParseDuration(m.Spec.TcpsCertRenewInterval) + requeueDuration += func() time.Duration { requeueDuration, _ := time.ParseDuration("1s"); return requeueDuration }() + futureRequeue = ctrl.Result{Requeue: true, RequeueAfter: requeueDuration} + } + if m.Status.CertRenewInterval != m.Spec.TcpsCertRenewInterval { + requeueDuration, _ := time.ParseDuration(m.Spec.TcpsCertRenewInterval) + requeueDuration += func() time.Duration { requeueDuration, _ := time.ParseDuration("1s"); return requeueDuration }() + futureRequeue = ctrl.Result{Requeue: true, RequeueAfter: requeueDuration} + + m.Status.CertRenewInterval = m.Spec.TcpsCertRenewInterval + } + // update clientWallet + err := r.updateClientWallet(m, readyPod, ctx, req) + if err != nil { + r.Log.Error(err, "Error in updating tnsnames.ora clientWallet...") + return requeueY, nil + } + } else if m.Spec.EnableTCPS && m.Status.IsTcpsEnabled && m.Spec.TcpsCertRenewInterval == "" { + // update clientWallet + err := r.updateClientWallet(m, readyPod, ctx, req) + if err != nil { + r.Log.Error(err, "Error in updating tnsnames.ora clientWallet...") + return requeueY, nil + } + } + return requeueN, nil +} + +// ############################################################################# +// +// Execute Datapatch +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) runDatapatch(m *dbapi.SingleInstanceDatabase, + readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + // Datapatch not supported for XE Database + if m.Spec.Edition == "express" || m.Spec.Edition == "free" { + eventReason := "Datapatch Check" + eventMsg := "datapatch not supported for " + m.Spec.Edition + " edition" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info(eventMsg) + return requeueN, nil + } + + m.Status.Status = dbcommons.StatusPatching + eventReason := "Datapatch Executing" + eventMsg := "datapatch begins execution" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + r.Status().Update(ctx, m) + + //RUN DATAPATCH out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.RunDatapatchCMD) if err != nil { @@ -1394,12 +2722,13 @@ func (r *SingleInstanceDatabaseReconciler) runDatapatch(m *dbapi.SingleInstanceD // Get Sqlpatch Description out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba ", dbcommons.GetSqlpatchDescriptionSQL)) + releaseUpdate := "" if err == nil { r.Log.Info("GetSqlpatchDescriptionSQL Output") r.Log.Info(out) SqlpatchDescriptions, _ := dbcommons.StringToLines(out) if len(SqlpatchDescriptions) > 0 { - m.Status.ReleaseUpdate = SqlpatchDescriptions[0] + releaseUpdate = SqlpatchDescriptions[0] } } @@ -1412,59 +2741,86 @@ func (r *SingleInstanceDatabaseReconciler) runDatapatch(m *dbapi.SingleInstanceD m.Status.DatafilesPatched = "true" status, versionFrom, versionTo, _ := dbcommons.GetSqlpatchStatus(r, r.Config, readyPod, ctx, req) - eventMsg = "data files patched from " + versionFrom + " to " + versionTo + " : " + status + if versionTo != "" { + eventMsg = "data files patched from release update " + versionFrom + " to " + versionTo + ", " + status + ": " + releaseUpdate + } else { + eventMsg = "datapatch execution completed" + } r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) return requeueN, nil } -//############################################################################# -// Update Init Parameters -//############################################################################# +// ############################################################################# +// +// Update Init Parameters +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) updateInitParameters(m *dbapi.SingleInstanceDatabase, readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("updateInitParameters", req.NamespacedName) - if m.Status.InitParams == m.Spec.InitParams { + if m.Spec.InitParams == nil { return requeueN, nil } - - out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", - ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.AlterSgaPgaCpuCMD, m.Spec.InitParams.SgaTarget, - m.Spec.InitParams.PgaAggregateTarget, m.Spec.InitParams.CpuCount, dbcommons.GetSqlClient(m.Spec.Edition))) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err + if m.Status.InitParams == *m.Spec.InitParams { + return requeueN, nil } - log.Info("AlterSgaPgaCpuCMD Output:" + out) - if m.Status.InitParams.Processes != m.Spec.InitParams.Processes { - // Altering 'Processes' needs database to be restarted + if (m.Spec.InitParams.PgaAggregateTarget != 0 && (m.Spec.InitParams.PgaAggregateTarget != m.Status.InitParams.PgaAggregateTarget)) || (m.Spec.InitParams.SgaTarget != 0 && (m.Spec.InitParams.SgaTarget != m.Status.InitParams.SgaTarget)) { + log.Info("Executing alter sga pga command", "pga_size", m.Spec.InitParams.PgaAggregateTarget, "sga_size", m.Spec.InitParams.SgaTarget) out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", - ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.AlterProcessesCMD, m.Spec.InitParams.Processes, dbcommons.GetSqlClient(m.Spec.Edition), - dbcommons.GetSqlClient(m.Spec.Edition))) + ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.AlterSgaPgaCMD, m.Spec.InitParams.SgaTarget, + m.Spec.InitParams.PgaAggregateTarget, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err } - log.Info("AlterProcessesCMD Output:" + out) + // Notify the user about unsucessfull init-parameter value change + if strings.Contains(out, "ORA-") { + eventReason := "Invalid init-param value" + eventMsg := "Unable to change the init-param as specified. Error log: \n" + out + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + } + log.Info("AlterSgaPgaCpuCMD Output:" + out) } - out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", - ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.GetInitParamsSQL, dbcommons.GetSqlClient(m.Spec.Edition))) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err + if (m.Spec.InitParams.CpuCount != 0) && (m.Status.InitParams.CpuCount != m.Spec.InitParams.CpuCount) { + log.Info("Executing alter cpu count command", "cpuCount", m.Spec.InitParams.CpuCount) + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, + "bash", "-c", fmt.Sprintf(dbcommons.AlterCpuCountCMD, m.Spec.InitParams.CpuCount, dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY, err + } + if strings.Contains(out, "ORA-") { + eventReason := "Invalid init-param value" + eventMsg := "Unable to change the init-param as specified. Error log: \n" + out + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + } + log.Info("AlterCpuCountCMD Output:" + out) } - log.Info("GetInitParamsSQL Output:" + out) - m.Status.InitParams = m.Spec.InitParams + if (m.Spec.InitParams.Processes != 0) && (m.Status.InitParams.Processes != m.Spec.InitParams.Processes) { + log.Info("Executing alter processes command", "processes", m.Spec.InitParams.Processes) + // Altering 'Processes' needs database to be restarted + out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf(dbcommons.AlterProcessesCMD, m.Spec.InitParams.Processes, dbcommons.SQLPlusCLI, + dbcommons.SQLPlusCLI)) + if err != nil { + log.Error(err, err.Error()) + return requeueY, err + } + log.Info("AlterProcessesCMD Output:" + out) + } return requeueN, nil } -//############################################################################# -// Update DB config params like FLASHBACK , FORCELOGGING , ARCHIVELOG -//############################################################################# +// ############################################################################# +// +// Update DB config params like FLASHBACK , FORCELOGGING , ARCHIVELOG +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanceDatabase, readyPod corev1.Pod, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -1483,21 +2839,15 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc flashBackStatus, archiveLogStatus, forceLoggingStatus, result := dbcommons.CheckDBConfig(readyPod, r, r.Config, ctx, req, m.Spec.Edition) if result.Requeue { + m.Status.Status = dbcommons.StatusNotReady return result, nil } - m.Status.ArchiveLog = strconv.FormatBool(archiveLogStatus) - m.Status.ForceLogging = strconv.FormatBool(forceLoggingStatus) - m.Status.FlashBack = strconv.FormatBool(flashBackStatus) - - log.Info("Flashback", "Status :", flashBackStatus) - log.Info("ArchiveLog", "Status :", archiveLogStatus) - log.Info("ForceLog", "Status :", forceLoggingStatus) //################################################################################################# // TURNING FLASHBACK , ARCHIVELOG , FORCELOGGING TO TRUE //################################################################################################# - if m.Spec.ArchiveLog && !archiveLogStatus { + if m.Spec.ArchiveLog != nil && *m.Spec.ArchiveLog && !archiveLogStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateDBRecoveryDestCMD) @@ -1509,7 +2859,7 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc log.Info(out) out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.SetDBRecoveryDestSQL, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.SetDBRecoveryDestSQL, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err @@ -1518,7 +2868,7 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc log.Info(out) out, err = dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.ArchiveLogTrueCMD, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf(dbcommons.ArchiveLogTrueCMD, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err @@ -1528,9 +2878,9 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc } - if m.Spec.ForceLogging && !forceLoggingStatus { + if m.Spec.ForceLogging != nil && *m.Spec.ForceLogging && !forceLoggingStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.ForceLoggingTrueSQL, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.ForceLoggingTrueSQL, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err @@ -1539,27 +2889,29 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc log.Info(out) } - if m.Spec.FlashBack && !flashBackStatus { + if m.Spec.FlashBack != nil && *m.Spec.FlashBack && !flashBackStatus { _, archiveLogStatus, _, result := dbcommons.CheckDBConfig(readyPod, r, r.Config, ctx, req, m.Spec.Edition) if result.Requeue { + m.Status.Status = dbcommons.StatusNotReady return result, nil } if archiveLogStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.FlashBackTrueSQL, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.FlashBackTrueSQL, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) + m.Status.Status = dbcommons.StatusNotReady return requeueY, err } log.Info("FlashBackTrue Output") log.Info(out) } else { - // Occurs when flashback is attermpted to be turned on without turning on archiving first - eventReason := "Waiting" - eventMsg := "enable ArchiveLog to turn ON Flashback" - log.Info(eventMsg) + // Occurs when flashback is attempted to be turned on without turning on archiving first + eventReason := "Database Check" + eventMsg := "enable ArchiveLog to turn on Flashback" r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + log.Info(eventMsg) changeArchiveLog = true } @@ -1569,9 +2921,9 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc // TURNING FLASHBACK , ARCHIVELOG , FORCELOGGING TO FALSE //################################################################################################# - if !m.Spec.FlashBack && flashBackStatus { + if m.Spec.FlashBack != nil && !*m.Spec.FlashBack && flashBackStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.FlashBackFalseSQL, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.FlashBackFalseSQL, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err @@ -1579,35 +2931,37 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc log.Info("FlashBackFalse Output") log.Info(out) } - if !m.Spec.ArchiveLog && archiveLogStatus { + if m.Spec.ArchiveLog != nil && !*m.Spec.ArchiveLog && archiveLogStatus { flashBackStatus, _, _, result := dbcommons.CheckDBConfig(readyPod, r, r.Config, ctx, req, m.Spec.Edition) if result.Requeue { + m.Status.Status = dbcommons.StatusNotReady return result, nil } if !flashBackStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.ArchiveLogFalseCMD, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf(dbcommons.ArchiveLogFalseCMD, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) + m.Status.Status = dbcommons.StatusNotReady return requeueY, err } log.Info("ArchiveLogFalse Output") log.Info(out) } else { - // Occurs when archiving is attermpted to be turned off without turning off flashback first - eventReason := "Waiting" - eventMsg := "turn OFF Flashback to disable ArchiveLog" - log.Info(eventMsg) + // Occurs when archiving is attempted to be turned off without turning off flashback first + eventReason := "Database Check" + eventMsg := "turn off Flashback to disable ArchiveLog" r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + log.Info(eventMsg) changeArchiveLog = true } } - if !m.Spec.ForceLogging && forceLoggingStatus { + if m.Spec.ForceLogging != nil && !*m.Spec.ForceLogging && forceLoggingStatus { out, err := dbcommons.ExecCommand(r, r.Config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.ForceLoggingFalseSQL, dbcommons.GetSqlClient(m.Spec.Edition))) + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.ForceLoggingFalseSQL, dbcommons.SQLPlusCLI)) if err != nil { log.Error(err, err.Error()) return requeueY, err @@ -1622,6 +2976,7 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc flashBackStatus, archiveLogStatus, forceLoggingStatus, result = dbcommons.CheckDBConfig(readyPod, r, r.Config, ctx, req, m.Spec.Edition) if result.Requeue { + m.Status.Status = dbcommons.StatusNotReady return result, nil } @@ -1636,29 +2991,141 @@ func (r *SingleInstanceDatabaseReconciler) updateDBConfig(m *dbapi.SingleInstanc // Needs to restart the Non Ready Pods ( Delete old ones and create new ones ) if m.Status.FlashBack == strconv.FormatBool(false) && flashBackStatus { - // call FindPods() to fetch pods all version/images of the same SIDB kind + // // call FindPods() to fetch pods all version/images of the same SIDB kind readyPod, replicasFound, available, _, err := dbcommons.FindPods(r, "", "", m.Name, m.Namespace, ctx, req) if err != nil { log.Error(err, err.Error()) return requeueY, err } - // delete non ready Pods as flashback needs restart of pods + // delete non ready Pods as flashback needs restart of pods to make sure failover works in sidbs with multiple replicas _, err = r.deletePods(ctx, req, m, available, readyPod, replicasFound, 1) - return requeueY, err + if err != nil { + log.Error(err, err.Error()) + return requeueY, err + } + return requeueN, err } m.Status.FlashBack = strconv.FormatBool(flashBackStatus) - if !changeArchiveLog && (flashBackStatus != m.Spec.FlashBack || - archiveLogStatus != m.Spec.ArchiveLog || forceLoggingStatus != m.Spec.ForceLogging) { + if !changeArchiveLog && ((m.Spec.FlashBack != nil && (flashBackStatus != *m.Spec.FlashBack)) || + (m.Spec.ArchiveLog != nil && (archiveLogStatus != *m.Spec.ArchiveLog)) || (m.Spec.ForceLogging != nil && (forceLoggingStatus != *m.Spec.ForceLogging))) { return requeueY, nil } return requeueN, nil } -//############################################################################# -// Manage Finalizer to cleanup before deletion of SingleInstanceDatabase -//############################################################################# +// ############################################################################# +// +// # Update Single instance database resource status +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) updateSidbStatus(sidb *dbapi.SingleInstanceDatabase, sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("updateSidbStatus", req.NamespacedName) + + flashBackStatus, archiveLogStatus, forceLoggingStatus, result := dbcommons.CheckDBConfig(sidbReadyPod, r, r.Config, ctx, req, sidb.Spec.Edition) + if result.Requeue { + sidb.Status.Status = dbcommons.StatusNotReady + return fmt.Errorf("could not check the database conifg of %s", sidb.Name) + } + + log.Info("flashBack", "Status :", flashBackStatus, "Reconcile Step : ", "updateSidbStatus") + log.Info("ArchiveLog", "Status :", archiveLogStatus, "Reconcile Step : ", "updateSidbStatus") + log.Info("forceLogging", "Status :", forceLoggingStatus, "Reconcile Step : ", "updateSidbStatus") + + sidb.Status.ArchiveLog = strconv.FormatBool(archiveLogStatus) + sidb.Status.ForceLogging = strconv.FormatBool(forceLoggingStatus) + sidb.Status.FlashBack = strconv.FormatBool(flashBackStatus) + + cpu_count, pga_aggregate_target, processes, sga_target, err := dbcommons.CheckDBInitParams(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return err + } + sidbInitParams := dbapi.SingleInstanceDatabaseInitParams{ + SgaTarget: sga_target, + PgaAggregateTarget: pga_aggregate_target, + Processes: processes, + CpuCount: cpu_count, + } + // log.Info("GetInitParamsSQL Output:" + out) + + sidb.Status.InitParams = sidbInitParams + // sidb.Status.InitParams = sidb.Spec.InitParams + + // Get database role and update the status + sidbRole, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return err + } + log.Info("Database "+sidb.Name, "Database Role : ", sidbRole) + sidb.Status.Role = sidbRole + + // Get database version and update the status + version, err := dbcommons.GetDatabaseVersion(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return err + } + log.Info("Database "+sidb.Name, "Database Version : ", version) + sidb.Status.ReleaseUpdate = version + + dbMajorVersion, err := strconv.Atoi(strings.Split(sidb.Status.ReleaseUpdate, ".")[0]) + if err != nil { + r.Log.Error(err, err.Error()) + return err + } + log.Info("Database "+sidb.Name, "Database Major Version : ", dbMajorVersion) + + // Checking if OEM is supported in the provided Database version + if dbMajorVersion >= 23 { + sidb.Status.OemExpressUrl = dbcommons.ValueUnavailable + } else { + sidb.Status.OemExpressUrl = oemExpressUrl + } + + if sidb.Status.Role == "PRIMARY" && sidb.Status.DatafilesPatched != "true" { + eventReason := "Datapatch Pending" + eventMsg := "datapatch execution pending" + r.Recorder.Eventf(sidb, corev1.EventTypeNormal, eventReason, eventMsg) + } + + // update status to Ready after all operations succeed + sidb.Status.Status = dbcommons.StatusReady + + r.Status().Update(ctx, sidb) + + return nil +} + +// ############################################################################# +// +// Update ORDS Status +// +// ############################################################################# +func (r *SingleInstanceDatabaseReconciler) updateORDSStatus(m *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) { + + if m.Status.OrdsReference == "" { + return + } + n := &dbapi.OracleRestDataService{} + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Status.OrdsReference}, n) + if err != nil { + return + } + + if n.Status.OrdsInstalled { + // Update Status to Healthy/Unhealthy when SIDB turns Healthy/Unhealthy after ORDS is Installed + n.Status.Status = m.Status.Status + r.Status().Update(ctx, n) + return + } +} + +// ############################################################################# +// +// Manage Finalizer to cleanup before deletion of SingleInstanceDatabase +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) manageSingleInstanceDatabaseDeletion(req ctrl.Request, ctx context.Context, m *dbapi.SingleInstanceDatabase) (ctrl.Result, error) { log := r.Log.WithValues("manageSingleInstanceDatabaseDeletion", req.NamespacedName) @@ -1701,9 +3168,11 @@ func (r *SingleInstanceDatabaseReconciler) manageSingleInstanceDatabaseDeletion( return requeueN, nil } -//############################################################################# -// Finalization logic for singleInstanceDatabaseFinalizer -//############################################################################# +// ############################################################################# +// +// Finalization logic for singleInstanceDatabaseFinalizer +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) cleanupSingleInstanceDatabase(req ctrl.Request, ctx context.Context, m *dbapi.SingleInstanceDatabase) (ctrl.Result, error) { log := r.Log.WithValues("cleanupSingleInstanceDatabase", req.NamespacedName) @@ -1717,6 +3186,13 @@ func (r *SingleInstanceDatabaseReconciler) cleanupSingleInstanceDatabase(req ctr return requeueY, nil } + if m.Status.DgBroker != nil { + eventReason := "Cannot Delete" + eventMsg := "database cannot be deleted as it is present in a DataGuard Broker configuration" + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + return requeueY, errors.New(eventMsg) + } + // call deletePods() with zero pods in avaiable and nil readyPod to delete all pods result, err := r.deletePods(ctx, req, m, []corev1.Pod{}, corev1.Pod{}, 0, 0) if result.Requeue { @@ -1738,20 +3214,209 @@ func (r *SingleInstanceDatabaseReconciler) cleanupSingleInstanceDatabase(req ctr for _, pod := range podList.Items { podNames += pod.Name + " " } - eventReason := "Waiting" - eventMsg := "waiting for " + req.Name + " database pods ( " + podNames + " ) to terminate" - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - r.Log.Info(eventMsg) - time.Sleep(15 * time.Second) } log.Info("Successfully cleaned up SingleInstanceDatabase") return requeueN, nil } -//############################################################################# -// SetupWithManager sets up the controller with the Manager -//############################################################################# +// ############################################################################################# +// +// Manage conversion of singleinstancedatabase from PHYSICAL_STANDBY To SNAPSHOT_STANDBY +// +// ############################################################################################# +func (r *SingleInstanceDatabaseReconciler) manageConvPhysicalToSnapshot(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("manageConvPhysicalToSnapshot", req.NamespacedName) + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, &singleInstanceDatabase); err != nil { + if apierrors.IsNotFound(err) { + log.Info("requested resource not found") + return requeueY, nil + } + log.Error(err, err.Error()) + return requeueY, err + } + + sidbReadyPod, err := GetDatabaseReadyPod(r, &singleInstanceDatabase, ctx, req) + if err != nil { + return requeueY, err + } + if sidbReadyPod.Name == "" { + log.Info("No ready Pod for the requested singleinstancedatabase") + return requeueY, nil + } + + if singleInstanceDatabase.Spec.ConvertToSnapshotStandby { + // Convert a PHYSICAL_STANDBY -> SNAPSHOT_STANDBY + singleInstanceDatabase.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, &singleInstanceDatabase) + if err := convertPhysicalStdToSnapshotStdDB(r, &singleInstanceDatabase, &sidbReadyPod, ctx, req); err != nil { + switch err { + case ErrNotPhysicalStandby: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database not in physical standby role") + log.Info("Conversion to Snapshot Standby not allowed as database not in physical standby role") + return requeueY, nil + case ErrDBNotConfiguredWithDG: + // cannot convert to snapshot database + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database is not configured with dataguard") + log.Info("Conversion to Snapshot Standby not allowed as requested database is not configured with dataguard") + return requeueY, nil + case ErrFSFOEnabledForDGConfig: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database is a FastStartFailover target") + log.Info("Conversion to Snapshot Standby Not allowed as database is a FastStartFailover target") + return requeueY, nil + case ErrAdminPasswordSecretNotFound: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Admin Password", "Database admin password secret not found") + log.Info("Database admin password secret not found") + return requeueY, nil + default: + log.Error(err, err.Error()) + return requeueY, nil + } + } + log.Info(fmt.Sprintf("Database %s converted to snapshot standby", singleInstanceDatabase.Name)) + singleInstanceDatabase.Status.ConvertToSnapshotStandby = true + singleInstanceDatabase.Status.Status = dbcommons.StatusReady + // Get database role and update the status + sidbRole, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return requeueN, err + } + log.Info("Database "+singleInstanceDatabase.Name, "Database Role : ", sidbRole) + singleInstanceDatabase.Status.Role = sidbRole + r.Status().Update(ctx, &singleInstanceDatabase) + } else { + // Convert a SNAPSHOT_STANDBY -> PHYSICAL_STANDBY + singleInstanceDatabase.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, &singleInstanceDatabase) + if err := convertSnapshotStdToPhysicalStdDB(r, &singleInstanceDatabase, &sidbReadyPod, ctx, req); err != nil { + switch err { + default: + r.Log.Error(err, err.Error()) + return requeueY, nil + } + } + singleInstanceDatabase.Status.ConvertToSnapshotStandby = false + singleInstanceDatabase.Status.Status = dbcommons.StatusReady + // Get database role and update the status + sidbRole, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return requeueN, err + } + log.Info("Database "+singleInstanceDatabase.Name, "Database Role : ", sidbRole) + singleInstanceDatabase.Status.Role = sidbRole + r.Status().Update(ctx, &singleInstanceDatabase) + } + + return requeueN, nil +} + +func convertPhysicalStdToSnapshotStdDB(r *SingleInstanceDatabaseReconciler, singleInstanceDatabase *dbapi.SingleInstanceDatabase, sidbReadyPod *corev1.Pod, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("convertPhysicalStdToSnapshotStdDB", req.NamespacedName) + log.Info(fmt.Sprintf("Checking the role %s database i.e %s", singleInstanceDatabase.Name, singleInstanceDatabase.Status.Role)) + if singleInstanceDatabase.Status.Role != "PHYSICAL_STANDBY" { + return ErrNotPhysicalStandby + } + + var dataguardBroker dbapi.DataguardBroker + log.Info(fmt.Sprintf("Checking if the database %s is configured with dgbroker or not ?", singleInstanceDatabase.Name)) + if singleInstanceDatabase.Status.DgBroker != nil { + if err := r.Get(ctx, types.NamespacedName{Namespace: singleInstanceDatabase.Namespace, Name: *singleInstanceDatabase.Status.DgBroker}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource not found") + return errors.New("Dataguardbroker resource not found") + } + return err + } + log.Info(fmt.Sprintf("database %s is configured with dgbroker %s", singleInstanceDatabase.Name, *singleInstanceDatabase.Status.DgBroker)) + if fastStartFailoverStatus, _ := strconv.ParseBool(dataguardBroker.Status.FastStartFailover); fastStartFailoverStatus { + // not allowed to convert to snapshot standby + return ErrFSFOEnabledForDGConfig + } + } else { + // cannot convert to snapshot database + return ErrDBNotConfiguredWithDG + } + + // get singleinstancedatabase ready pod + // execute the dgmgrl command for conversion to snapshot database + // Exception handling + // Get Admin password for current primary database + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: singleInstanceDatabase.Spec.AdminPassword.SecretName, Namespace: singleInstanceDatabase.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[singleInstanceDatabase.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + if _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)); err != nil { + return err + } + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("dgmgrl sys@%s \"convert database %s to snapshot standby;\" < admin.pwd", dataguardBroker.Status.PrimaryDatabase, singleInstanceDatabase.Status.Sid)) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Convert to snapshot standby command output \n %s", out)) + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"alter pluggable database %s open;\" | %s", singleInstanceDatabase.Status.Pdbname, dbcommons.SQLPlusCLI)) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Open pluggable databases output \n %s", out)) + + return nil +} + +func convertSnapshotStdToPhysicalStdDB(r *SingleInstanceDatabaseReconciler, singleInstanceDatabase *dbapi.SingleInstanceDatabase, sidbReadyPod *corev1.Pod, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("convertSnapshotStdToPhysicalStdDB", req.NamespacedName) + + var dataguardBroker dbapi.DataguardBroker + if err := r.Get(ctx, types.NamespacedName{Namespace: singleInstanceDatabase.Namespace, Name: *singleInstanceDatabase.Status.DgBroker}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + return errors.New("dataguardbroker resource not found") + } + return err + } + + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: singleInstanceDatabase.Spec.AdminPassword.SecretName, Namespace: singleInstanceDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + return ErrAdminPasswordSecretNotFound + } + return err + } + var adminPassword string = string(adminPasswordSecret.Data[singleInstanceDatabase.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + return err + } + log.Info("Converting snapshot standby to physical standby") + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("dgmgrl sys@%s \"convert database %s to physical standby;\" < admin.pwd", dataguardBroker.Status.PrimaryDatabase, singleInstanceDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info(fmt.Sprintf("Database %s converted to physical standby \n %s", singleInstanceDatabase.Name, out)) + log.Info("opening the PDB for the database") + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"alter pluggable database %s open;\" | %s", singleInstanceDatabase.Status.Pdbname, dbcommons.SQLPlusCLI)) + if err != nil { + r.Log.Error(err, err.Error()) + return err + } + log.Info(fmt.Sprintf("PDB open command output %s", out)) + + return nil +} + +// ############################################################################# +// +// SetupWithManager sets up the controller with the Manager +// +// ############################################################################# func (r *SingleInstanceDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dbapi.SingleInstanceDatabase{}). @@ -1760,3 +3425,517 @@ func (r *SingleInstanceDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) er WithOptions(controller.Options{MaxConcurrentReconciles: 100}). //ReconcileHandler is never invoked concurrently with the same object. Complete(r) } + +// ############################################################################# +// +// Check primary database status +// +// ############################################################################# +func CheckPrimaryDatabaseStatus(p *dbapi.SingleInstanceDatabase) error { + + if p.Status.Status != dbcommons.StatusReady { + return fmt.Errorf("referred primary database %v is NOT READY", p.Name) + } + return nil +} + +// ############################################################################# +// +// Check if refered database is the primary database +// +// ############################################################################# +func CheckDatabaseRoleAsPrimary(p *dbapi.SingleInstanceDatabase) error { + + if strings.ToUpper(p.Status.Role) != "PRIMARY" { + return fmt.Errorf("referred database %v is not in PRIMARY role", p.Name) + } + return nil +} + +// ############################################################################# +// +// Get ready pod for the singleinstancedatabase resource +// +// ############################################################################# +func GetDatabaseReadyPod(r client.Reader, d *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (corev1.Pod, error) { + + dbReadyPod, _, _, _, err := dbcommons.FindPods(r, d.Spec.Image.Version, + d.Spec.Image.PullFrom, d.Name, d.Namespace, ctx, req) + + return dbReadyPod, err +} + +// ############################################################################# +// +// Get admin password for singleinstancedatabase +// +// ############################################################################# +func GetDatabaseAdminPassword(r client.Reader, d *dbapi.SingleInstanceDatabase, ctx context.Context) (string, error) { + + adminPasswordSecret := &corev1.Secret{} + adminPassword := "" + err := r.Get(ctx, types.NamespacedName{Name: d.Spec.AdminPassword.SecretName, Namespace: d.Namespace}, adminPasswordSecret) + if err != nil { + return adminPassword, err + } + adminPassword = string(adminPasswordSecret.Data[d.Spec.AdminPassword.SecretKey]) + return adminPassword, nil +} + +// ############################################################################# +// +// Validate primary singleinstancedatabase admin password +// +// ############################################################################# +func ValidatePrimaryDatabaseAdminPassword(r *SingleInstanceDatabaseReconciler, p *dbapi.SingleInstanceDatabase, + adminPassword string, ctx context.Context, req ctrl.Request) error { + + dbReadyPod, err := GetDatabaseReadyPod(r, p, ctx, req) + if err != nil { + return err + } + + out, err := dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.ValidateAdminPassword, adminPassword), dbcommons.GetSqlClient(p.Spec.Edition))) + if err != nil { + return err + } + + if strings.Contains(out, "USER is \"SYS\"") { + r.Log.Info("validated Admin password successfully") + } else { + if strings.Contains(out, "ORA-01017") { + r.Log.Info("Invalid primary database password, Logon denied") + } + return fmt.Errorf("primary database admin password validation failed") + } + + return nil +} + +// ############################################################################# +// +// Validate refered primary database db params are all enabled +// +// ############################################################################# +func ValidateDatabaseConfiguration(p *dbapi.SingleInstanceDatabase) error { + var missingModes []string + if p.Status.ArchiveLog == "false" { + missingModes = append(missingModes, "ArchiveLog") + } + if p.Status.FlashBack == "false" { + missingModes = append(missingModes, "FlashBack") + } + if p.Status.ForceLogging == "false" { + missingModes = append(missingModes, "ForceLogging") + } + if p.Status.ArchiveLog == "false" || p.Status.FlashBack == "false" || p.Status.ForceLogging == "false" { + return fmt.Errorf("%v modes are not enabled in the primary database %v", strings.Join(missingModes, ","), p.Name) + } + return nil +} + +// ############################################################################# +// +// Validate refered primary database for standby sidb creation +// +// ############################################################################# +func ValidatePrimaryDatabaseForStandbyCreation(r *SingleInstanceDatabaseReconciler, stdby *dbapi.SingleInstanceDatabase, + primary *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("ValidatePrimaryDatabase", req.NamespacedName) + + if stdby.Status.DatafilesCreated == "true" { + return nil + } + + log.Info(fmt.Sprintf("Checking primary database %s status...", primary.Name)) + err := CheckPrimaryDatabaseStatus(primary) + if err != nil { + stdby.Status.Status = dbcommons.StatusPending + return err + } + + log.Info("Checking for referred database role...") + err = CheckDatabaseRoleAsPrimary(primary) + if err != nil { + stdby.Status.Status = dbcommons.StatusError + return err + } + + r.Recorder.Eventf(stdby, corev1.EventTypeNormal, "Validation", "Primary database is ready") + + adminPassword, err := GetDatabaseAdminPassword(r, stdby, ctx) + if err != nil { + stdby.Status.Status = dbcommons.StatusError + return err + } + + log.Info(fmt.Sprintf("Validating admin password for the primary Database %s...", primary.Name)) + err = ValidatePrimaryDatabaseAdminPassword(r, primary, adminPassword, ctx, req) + if err != nil { + stdby.Status.Status = dbcommons.StatusError + return err + } + + log.Info(fmt.Sprintf("Validating primary database %s configuration...", primary.Name)) + err = ValidateDatabaseConfiguration(primary) + if err != nil { + r.Recorder.Eventf(stdby, corev1.EventTypeWarning, "Spec Error", err.Error()) + stdby.Status.Status = dbcommons.StatusError + return err + } + + r.Recorder.Eventf(stdby, corev1.EventTypeNormal, "Validation", "Successfully validated the primary database admin password and configuration") + + return nil +} + +// ############################################################################# +// +// Get total database pods for singleinstancedatabase +// +// ############################################################################# +func GetTotalDatabasePods(r client.Reader, d *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (int, error) { + _, totalPods, _, _, err := dbcommons.FindPods(r, d.Spec.Image.Version, + d.Spec.Image.PullFrom, d.Name, d.Namespace, ctx, req) + + return totalPods, err +} + +// ############################################################################# +// +// Set tns names for primary database for dataguard configuraion +// +// ############################################################################# +func SetupTnsNamesPrimaryForDG(r *SingleInstanceDatabaseReconciler, p *dbapi.SingleInstanceDatabase, s *dbapi.SingleInstanceDatabase, + primaryReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + + out, err := dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("cat /opt/oracle/oradata/dbconfig/%s/tnsnames.ora", strings.ToUpper(p.Spec.Sid))) + if err != nil { + return fmt.Errorf("error obtaining the contents of tnsnames.ora in the primary database %v", p.Name) + } + r.Log.Info("tnsnames.ora content is as follows:") + r.Log.Info(out) + + if strings.Contains(out, "(SERVICE_NAME = "+strings.ToUpper(s.Spec.Sid)+")") { + r.Log.Info("TNS ENTRY OF " + s.Spec.Sid + " ALREADY EXISTS ON PRIMARY Database ") + } else { + tnsnamesEntry := dbcommons.StandbyTnsnamesEntry + tnsnamesEntry = strings.ReplaceAll(tnsnamesEntry, "##STANDBYDATABASE_SID##", s.Spec.Sid) + tnsnamesEntry = strings.ReplaceAll(tnsnamesEntry, "##STANDBYDATABASE_SERVICE_EXPOSED##", s.Name) + + out, err = dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | cat >> /opt/oracle/oradata/dbconfig/%s/tnsnames.ora ", tnsnamesEntry, strings.ToUpper(p.Spec.Sid))) + if err != nil { + return fmt.Errorf("unable to set tnsnames.ora in the primary database %v", p.Name) + } + r.Log.Info("Modifying tnsnames.ora Output") + r.Log.Info(out) + + } + return nil +} + +// ############################################################################# +// +// Restarting listners in database +// +// ############################################################################# +func RestartListenerInDatabase(r *SingleInstanceDatabaseReconciler, primaryReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + r.Log.Info("Restarting listener in the database through pod", "primary database pod name", primaryReadyPod.Name) + out, err := dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", "lsnrctl stop && lsnrctl start") + if err != nil { + return fmt.Errorf("unable to restart listener in the database through pod %v", primaryReadyPod.Name) + } + r.Log.Info("Listener restart output") + r.Log.Info(out) + return nil +} + +// ############################################################################# +// +// Setup primary listener for dataguard configuration +// +// ############################################################################# +func SetupListenerPrimaryForDG(r *SingleInstanceDatabaseReconciler, p *dbapi.SingleInstanceDatabase, s *dbapi.SingleInstanceDatabase, + primaryReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + + out, err := dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("cat /opt/oracle/oradata/dbconfig/%s/listener.ora ", strings.ToUpper(p.Spec.Sid))) + if err != nil { + return fmt.Errorf("unable to obtain contents of listener.ora in primary database %v", p.Name) + } + r.Log.Info("listener.ora Output") + r.Log.Info(out) + + if strings.Contains(out, strings.ToUpper(p.Spec.Sid)+"_DGMGRL") { + r.Log.Info("LISTENER.ORA ALREADY HAS " + p.Spec.Sid + "_DGMGRL ENTRY IN SID_LIST_LISTENER ") + } else { + out, err = dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | cat > /opt/oracle/oradata/dbconfig/%s/listener.ora ", dbcommons.ListenerEntry, strings.ToUpper(p.Spec.Sid))) + if err != nil { + return fmt.Errorf("unable to modify listener.ora in the primary database %v", p.Name) + } + r.Log.Info("Modifying listener.ora Output") + r.Log.Info(out) + + err = RestartListenerInDatabase(r, primaryReadyPod, ctx, req) + if err != nil { + return err + } + + } + return nil +} + +// ############################################################################# +// +// Setup init parameters of primary database for dataguard configuration +// +// ############################################################################# +func SetupInitParamsPrimaryForDG(r *SingleInstanceDatabaseReconciler, primaryReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + r.Log.Info("Running StandbyDatabasePrerequisitesSQL in the primary database") + out, err := dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.StandbyDatabasePrerequisitesSQL, dbcommons.SQLPlusCLI)) + if err != nil { + return fmt.Errorf("unable to run StandbyDatabasePrerequisitesSQL in primary database") + } + r.Log.Info("StandbyDatabasePrerequisites Output") + r.Log.Info(out) + return nil +} + +// ############################################################################# +// +// Setup primary database for standby singleinstancedatabase +// +// ############################################################################# +func SetupPrimaryDatabase(r *SingleInstanceDatabaseReconciler, stdby *dbapi.SingleInstanceDatabase, + primary *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("SetupPrimaryDatabase", req.NamespacedName) + + totalStandbyPods, err := GetTotalDatabasePods(r, stdby, ctx, req) + if err != nil { + return err + } + // NO need to setup primary database if standby database pods are initialized + if totalStandbyPods > 0 { + return nil + } + + primaryDbReadyPod, err := GetDatabaseReadyPod(r, primary, ctx, req) + if err != nil { + return err + } + + log.Info("Setting up tnsnames.ora in primary database", "primaryDatabase", primary.Name) + err = SetupTnsNamesPrimaryForDG(r, primary, stdby, primaryDbReadyPod, ctx, req) + if err != nil { + return err + } + + log.Info("Setting up listener.ora in primary database", "primaryDatabase", primary.Name) + err = SetupListenerPrimaryForDG(r, primary, stdby, primaryDbReadyPod, ctx, req) + if err != nil { + return err + } + + log.Info("Setting up some InitParams for DG in primary database", "primaryDatabase", primary.Name) + err = SetupInitParamsPrimaryForDG(r, primaryDbReadyPod, ctx, req) + if err != nil { + return err + } + + return nil + +} + +// ############################################################################# +// +// Get all pdbs in a singleinstancedatabase +// +// ############################################################################# +func GetAllPdbInDatabase(r *SingleInstanceDatabaseReconciler, dbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ([]string, error) { + var pdbs []string + out, err := dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba", dbcommons.GetPdbsSQL)) + if err != nil { + r.Log.Error(err, err.Error()) + return pdbs, err + } + r.Log.Info("GetPdbsSQL Output") + r.Log.Info(out) + + pdbs, _ = dbcommons.StringToLines(out) + return pdbs, nil +} + +// ############################################################################# +// +// Setup tnsnames.ora for all the pdb list in the singleinstancedatabase +// +// ############################################################################# +func SetupTnsNamesForPDBListInDatabase(r *SingleInstanceDatabaseReconciler, d *dbapi.SingleInstanceDatabase, + dbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request, pdbList []string) error { + for _, pdb := range pdbList { + if pdb == "" { + continue + } + + // Get the Tnsnames.ora entries + out, err := dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", + ctx, req, false, "bash", "-c", fmt.Sprintf("cat /opt/oracle/oradata/dbconfig/%s/tnsnames.ora", strings.ToUpper(d.Spec.Sid))) + if err != nil { + return err + } + r.Log.Info("tnsnames.ora Output") + r.Log.Info(out) + + if strings.Contains(out, "(SERVICE_NAME = "+strings.ToUpper(pdb)+")") { + r.Log.Info("TNS ENTRY OF " + strings.ToUpper(pdb) + " ALREADY EXISTS ON SIDB ") + } else { + tnsnamesEntry := dbcommons.PDBTnsnamesEntry + tnsnamesEntry = strings.ReplaceAll(tnsnamesEntry, "##PDB_NAME##", strings.ToUpper(pdb)) + + // Add Tnsnames.ora For pdb on Standby Database + out, err = dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | cat >> /opt/oracle/oradata/dbconfig/%s/tnsnames.ora ", tnsnamesEntry, strings.ToUpper(d.Spec.Sid))) + if err != nil { + return err + } + r.Log.Info("Modifying tnsnames.ora for Pdb Output") + r.Log.Info(out) + + } + } + + return nil +} + +// ############################################################################# +// +// Setup tnsnames.ora in standby database for primary singleinstancedatabase +// +// ############################################################################# +func SetupPrimaryDBTnsNamesInStandby(r *SingleInstanceDatabaseReconciler, s *dbapi.SingleInstanceDatabase, + dbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + + out, err := dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | cat >> /opt/oracle/oradata/dbconfig/%s/tnsnames.ora ", dbcommons.PrimaryTnsnamesEntry, strings.ToUpper(s.Spec.Sid))) + if err != nil { + return err + } + r.Log.Info("Modifying tnsnames.ora Output") + r.Log.Info(out) + + return nil +} + +// ############################################################################# +// +// Enabling flashback in singleinstancedatabase +// +// ############################################################################# +func EnableFlashbackInDatabase(r *SingleInstanceDatabaseReconciler, dbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) error { + out, err := dbcommons.ExecCommand(r, r.Config, dbReadyPod.Name, dbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", dbcommons.FlashBackTrueSQL, dbcommons.GetSqlClient("enterprise"))) + if err != nil { + return err + } + r.Log.Info("FlashBackTrue Output") + r.Log.Info(out) + return nil +} + +// ############################################################################# +// +// setup standby database +// +// ############################################################################# +func SetupStandbyDatabase(r *SingleInstanceDatabaseReconciler, stdby *dbapi.SingleInstanceDatabase, + primary *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) error { + + primaryReadyPod, err := GetDatabaseReadyPod(r, primary, ctx, req) + if err != nil { + return err + } + r.Log.Info("Primary DB Name: " + primaryReadyPod.Name) + + stdbyReadyPod, err := GetDatabaseReadyPod(r, stdby, ctx, req) + if err != nil { + return err + } + + r.Log.Info("Getting the list of all pdbs in primary database") + pdbListPrimary, err := GetAllPdbInDatabase(r, primaryReadyPod, ctx, req) + if err != nil { + return err + } + + r.Log.Info("Setting up tnsnames in standby database for the pdbs of primary database") + err = SetupTnsNamesForPDBListInDatabase(r, stdby, stdbyReadyPod, ctx, req, pdbListPrimary) + if err != nil { + return err + } + + r.Log.Info("Setting up tnsnames entry for primary database in standby database") + err = SetupPrimaryDBTnsNamesInStandby(r, stdby, stdbyReadyPod, ctx, req) + if err != nil { + return err + } + + r.Log.Info("Setting up listener in the standby database") + err = SetupListenerPrimaryForDG(r, stdby, primary, stdbyReadyPod, ctx, req) + if err != nil { + return err + } + + flashBackStatus, _, _, result := dbcommons.CheckDBConfig(stdbyReadyPod, r, r.Config, ctx, req, stdby.Spec.Edition) + if result.Requeue { + return fmt.Errorf("error in obtaining the Database Config status") + } + if !flashBackStatus { + r.Log.Info("Setting up flashback mode in the standby database") + err = EnableFlashbackInDatabase(r, stdbyReadyPod, ctx, req) + if err != nil { + return err + } + } + + return nil +} + +// ############################################################################# +// +// Create oracle hostname environment variable object to be passed to sidb +// +// ############################################################################# +func CreateOracleHostnameEnvVarObj(sidb *dbapi.SingleInstanceDatabase, referedPrimaryDatabase *dbapi.SingleInstanceDatabase) corev1.EnvVar { + dbMajorVersion, err := strconv.Atoi(strings.Split(referedPrimaryDatabase.Status.ReleaseUpdate, ".")[0]) + if err != nil { + // r.Log.Error(err, err.Error()) + return corev1.EnvVar{ + Name: "ORACLE_HOSTNAME", + Value: "", + } + } + if dbMajorVersion >= 23 { + return corev1.EnvVar{ + Name: "ORACLE_HOSTNAME", + Value: sidb.Name, + } + } else { + return corev1.EnvVar{ + Name: "ORACLE_HOSTNAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + } + } +} diff --git a/controllers/database/suite_test.go b/controllers/database/suite_test.go index 9b62324a..6c6772b4 100644 --- a/controllers/database/suite_test.go +++ b/controllers/database/suite_test.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,13 +42,12 @@ import ( "path/filepath" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -66,12 +65,10 @@ var testEnv *envtest.Environment func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func(done Done) { +var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("bootstrapping test environment") @@ -87,14 +84,15 @@ var _ = BeforeSuite(func(done Done) { err = databasev1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = databasev1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { By("tearing down the test environment") diff --git a/controllers/dataguard/datagauard_errors.go b/controllers/dataguard/datagauard_errors.go new file mode 100644 index 00000000..94b2b0ea --- /dev/null +++ b/controllers/dataguard/datagauard_errors.go @@ -0,0 +1,47 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "errors" +) + +var ErrSidbWithMutipleReplicas error = errors.New("SingleInstanceDatabase with multiple replicas is not supported") +var ErrCurrentPrimaryDatabaseNotReady error = errors.New("current primary database not ready") +var ErrCurrentPrimaryDatabaseNotFound error = errors.New("current primary database not found") diff --git a/controllers/dataguard/dataguard_utils.go b/controllers/dataguard/dataguard_utils.go new file mode 100644 index 00000000..4c16f82b --- /dev/null +++ b/controllers/dataguard/dataguard_utils.go @@ -0,0 +1,1061 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" +) + +// ############################################################################################################### +// +// Clean up necessary resources required prior to dataguardbroker resource deletion +// +// ############################################################################################################### +func cleanupDataguardBroker(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, req ctrl.Request, ctx context.Context) error { + log := ctrllog.FromContext(ctx).WithValues("cleanupDataguardBroker", req.NamespacedName) + + log.Info(fmt.Sprintf("Cleaning for dataguard broker %v deletion", broker.Name)) + + // Fetch Primary Database Reference + var sidb dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: broker.GetCurrentPrimaryDatabase()}, &sidb); err != nil { + if apierrors.IsNotFound(err) { + log.Info(fmt.Sprintf("SingleInstanceDatabase %s deleted.", broker.GetCurrentPrimaryDatabase())) + return err + } + return err + } + + log.Info(fmt.Sprintf("The current primary database is %v", sidb.Name)) + + // Validate if Primary Database Reference is ready + if err := validateSidbReadiness(r, broker, &sidb, ctx, req); err != nil { + log.Info("Reconcile queued") + return err + } + + log.Info(fmt.Sprintf("The current primary database %v is ready and healthy", sidb.Name)) + + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("Ready pod for the sidb %v is %v", sidb.Name, sidbReadyPod.Name)) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.RemoveDataguardConfiguration)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("RemoveDataguardConfiguration Output") + log.Info(out) + + for _, databaseRef := range broker.Status.DatabasesInDataguardConfig { + + var standbyDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: databaseRef}, &standbyDatabase); err != nil { + if apierrors.IsNotFound(err) { + continue + } + log.Error(err, err.Error()) + return err + } + + // Set DgBrokerConfigured to false + standbyDatabase.Status.DgBroker = nil + if err := r.Status().Update(ctx, &standbyDatabase); err != nil { + r.Recorder.Eventf(&standbyDatabase, corev1.EventTypeWarning, "Updating Status", "DgBrokerConfigured status updation failed") + log.Info(fmt.Sprintf("Status updation for sidb %s failed", standbyDatabase.Name)) + return err + } + } + + log.Info("Successfully cleaned up Dataguard Broker") + return nil +} + +// ##################################################################################################### +// +// Validate readiness of the primary singleinstancedatabase specified +// +// ##################################################################################################### +func validateSidbReadiness(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, sidb *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("validateSidbReadiness", req.NamespacedName) + + var adminPassword string + var sidbReadyPod corev1.Pod + + // Check if current primary singleinstancedatabase is "ready" + if sidb.Status.Status != dbcommons.StatusReady { + return ErrCurrentPrimaryDatabaseNotReady + } + + // ## FETCH THE SIDB REPLICAS . + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + if sidbReadyPod.Name == "" { + log.Info("No ready pod avail for the singleinstancedatabase") + return ErrCurrentPrimaryDatabaseNotReady + } + + log.Info(fmt.Sprintf("Ready pod for the singleInstanceDatabase %s is %s", sidb.Name, sidbReadyPod.Name)) + + // Validate databaseRef Admin Password + var adminPasswordSecret corev1.Secret + err = r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + sidb.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + sidb.Spec.AdminPassword.SecretName + " Not Found") + return fmt.Errorf("adminPassword secret for singleinstancedatabase %v not found", sidb.Name) + } + log.Error(err, err.Error()) + return err + } + adminPassword = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.ValidateAdminPassword, adminPassword), dbcommons.GetSqlClient(sidb.Spec.Edition))) + if err != nil { + fastStartFailoverStatus, _ := strconv.ParseBool(broker.Status.FastStartFailover) + if strings.Contains(err.Error(), "dialing backend") && broker.Status.Status == dbcommons.StatusReady && fastStartFailoverStatus { + // Connection to the pod is failing after broker came up and running + // Might suggest disconnect or pod/vm going down + log.Info("Dialing connection error") + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + return err + } + } + log.Error(err, err.Error()) + return err + } + + if strings.Contains(out, "USER is \"SYS\"") { + log.Info("validated Admin password successfully") + } else if strings.Contains(out, "ORA-01017") { + //m.Status.Status = dbcommons.StatusError + eventReason := "Logon denied" + eventMsg := "invalid databaseRef admin password. secret: " + sidb.Spec.AdminPassword.SecretName + r.Recorder.Eventf(broker, corev1.EventTypeWarning, eventReason, eventMsg) + return fmt.Errorf("logon denied for singleinstancedatabase %v", sidb.Name) + } else { + return fmt.Errorf("%v", out) + } + + return nil +} + +// ############################################################################# +// +// Setup the requested dataguard Configuration +// +// ############################################################################# +func setupDataguardBrokerConfiguration(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, sidb *dbapi.SingleInstanceDatabase, + ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("setupDataguardBrokerConfiguration", req.NamespacedName) + + // Get sidb ready pod for current primary database + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("broker.Spec.StandbyDatabaseRefs are %v", broker.Spec.StandbyDatabaseRefs)) + + for _, database := range broker.Spec.StandbyDatabaseRefs { + + log.Info(fmt.Sprintf("adding database %v", database)) + + // Get the standby database resource + var standbyDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: database}, &standbyDatabase) + if err != nil { + if apierrors.IsNotFound(err) { + eventReason := "Warning" + eventMsg := database + "not found" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + continue + } + log.Error(err, err.Error()) + return err + } + + // validate standby database status + if standbyDatabase.Status.Status != dbcommons.StatusReady { + eventReason := "Waiting" + eventMsg := "Waiting for " + standbyDatabase.Name + " to be Ready" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(fmt.Sprintf("single instance database %s not ready yet", standbyDatabase.Name)) + continue + } + + // Check if dataguard broker is already configured for the standby database + if standbyDatabase.Status.DgBroker != nil { + log.Info("Dataguard broker for standbyDatabase : " + standbyDatabase.Name + " is already configured") + continue + } + + // Check if dataguard broker already has a database with the same SID + _, ok := broker.Status.DatabasesInDataguardConfig[strings.ToUpper(standbyDatabase.Status.Sid)] + if ok { + log.Info("A database with the same SID is already configured in the DG") + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Spec Error", "A database with the same SID "+standbyDatabase.Status.Sid+" is already configured in the DG") + continue + } + + broker.Status.Status = dbcommons.StatusCreating + r.Status().Update(ctx, broker) + + // ## FETCH THE STANDBY REPLICAS . + standbyDatabaseReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, standbyDatabase.Name, standbyDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + if err := setupDataguardBrokerConfigurationForGivenDB(r, broker, sidb, &standbyDatabase, standbyDatabaseReadyPod, sidbReadyPod, ctx, req, adminPassword); err != nil { + log.Error(err, fmt.Sprintf(" Error while setting up DG broker for the Database %v:%v", standbyDatabase.Status.Sid, standbyDatabase.Name)) + return err + } + if len(broker.Status.DatabasesInDataguardConfig) == 0 { + log.Info("DatabasesInDataguardConfig is nil") + broker.Status.DatabasesInDataguardConfig = make(map[string]string) + } + log.Info(fmt.Sprintf("adding %v:%v to the map", standbyDatabase.Status.Sid, standbyDatabase.Name)) + broker.Status.DatabasesInDataguardConfig[standbyDatabase.Status.Sid] = standbyDatabase.Name + r.Status().Update(ctx, broker) + // Update Databases + } + if len(broker.Status.DatabasesInDataguardConfig) == 0 { + broker.Status.DatabasesInDataguardConfig = make(map[string]string) + } + log.Info(fmt.Sprintf("adding primary database %v:%v to the map", sidb.Status.Sid, sidb.Name)) + broker.Status.DatabasesInDataguardConfig[sidb.Status.Sid] = sidb.Name + + eventReason := "DG Configuration up to date" + eventMsg := "" + + // Patch DataguardBroker Service to point selector to Current Primary Name + if err := patchService(r, broker, ctx, req); err != nil { + log.Error(err, err.Error()) + return err + } + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + return nil +} + +// ############################################################################# +// +// Set up dataguard Configuration for a given StandbyDatabase +// +// ############################################################################# +func setupDataguardBrokerConfigurationForGivenDB(r *DataguardBrokerReconciler, m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, standbyDatabase *dbapi.SingleInstanceDatabase, + standbyDatabaseReadyPod corev1.Pod, sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request, adminPassword string) error { + + log := r.Log.WithValues("setupDataguardBrokerConfigurationForGivenDB", req.NamespacedName) + + if standbyDatabaseReadyPod.Name == "" || sidbReadyPod.Name == "" { + return errors.New("no ready Pod for the singleinstancedatabase") + } + + // ## CHECK IF DG CONFIGURATION AVAILABLE IN PRIMARY DATABSE## + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("ShowConfiguration Output") + log.Info(out) + + if strings.Contains(out, "ORA-16525") { + log.Info("ORA-16525: The Oracle Data Guard broker is not yet available on Primary") + return fmt.Errorf("ORA-16525: The Oracle Data Guard broker is not yet available on Primary database %v", n.Name) + } + + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file created") + + // ORA-16532: Oracle Data Guard broker configuration does not exist , so create one + if strings.Contains(out, "ORA-16532") { + if m.Spec.ProtectionMode == "MaxPerformance" { + // Construct the password file and dgbroker command file + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxPerformanceCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxPerformance Output") + log.Info(out) + } else if m.Spec.ProtectionMode == "MaxAvailability" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxAvailabilityCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxAvailability Output") + log.Info(out) + } else { + log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") + return err + } + + // ## SHOW CONFIGURATION DG + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } else { + log.Info("ShowConfiguration Output") + log.Info(out) + } + // Set DG Configured status to true for this standbyDatabase and primary Database. so that in next reconcilation, we dont configure this again + n.Status.DgBroker = &m.Name + standbyDatabase.Status.DgBroker = &m.Name + r.Status().Update(ctx, standbyDatabase) + r.Status().Update(ctx, n) + // Remove admin pwd file + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + dbcommons.RemoveAdminPasswordFile) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file removed") + + return err + } + + // DG Configuration Exists . So add the standbyDatabase to the existing DG Configuration + databases, err := GetDatabasesInDataGuardConfigurationWithRole(r, m, ctx, req) + if err != nil { + log.Info("Error while setting up the dataguard configuration") + log.Error(err, err.Error()) + return err + } + + // ## ADD DATABASE TO DG CONFIG , IF NOT PRESENT + found, _ := dbcommons.IsDatabaseFound(standbyDatabase.Spec.Sid, databases, "") + if found { + return err + } + primarySid := dbcommons.GetPrimaryDatabase(databases) + + // If user adds a new standby to a dg config when failover happened to one ot the standbys, we need to have current primary connect string + primaryConnectString := n.Name + ":1521/" + primarySid + if !strings.EqualFold(primarySid, n.Spec.Sid) { + primaryConnectString = m.Status.DatabasesInDataguardConfig[strings.ToUpper(primarySid)] + ":1521/" + primarySid + } + + if m.Spec.ProtectionMode == "MaxPerformance" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxPerformanceCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxPerformance Output") + log.Info(out) + + } else if m.Spec.ProtectionMode == "MaxAvailability" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxAvailabilityCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxAvailability Output") + log.Info(out) + + } else { + log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") + log.Error(err, err.Error()) + return err + } + + // Remove admin pwd file + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + dbcommons.RemoveAdminPasswordFile) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file removed") + + // Set DG Configured status to true for this standbyDatabase. so that in next reconcilation, we dont configure this again + standbyDatabase.Status.DgBroker = &m.Name + r.Status().Update(ctx, standbyDatabase) + + return nil +} + +// ########################################################################################################### +// +// Patch the service for dataguardbroker resource to point selector to current Primary Name +// +// ########################################################################################################### +func patchService(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("patchService", req.NamespacedName) + + primaryDatabaseRef := broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + var svc *corev1.Service = &corev1.Service{} + + // fetch the k8s service for the dataguardbroker resource + err := r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, svc) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Patching Service %s to point to the currPrimaryDatabase %s", svc.Name, primaryDatabaseRef)) + + // updating service selector for the primary database pod to attach itself to the service + svc.Spec.Selector["app"] = primaryDatabaseRef + if err = r.Update(ctx, svc); err != nil { + return err + } + log.Info(fmt.Sprintf("Patching service %s successful ", svc.Name)) + + // updating the dataguardbroker resource connect strings + broker.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" + if broker.Spec.LoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) > 0 { + lbAddress := svc.Status.LoadBalancer.Ingress[0].Hostname + if lbAddress == "" { + lbAddress = svc.Status.LoadBalancer.Ingress[0].IP + } + broker.Status.ExternalConnectString = lbAddress + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" + } + } else { + nodeip := dbcommons.GetNodeIp(r, ctx, req) + if nodeip != "" { + broker.Status.ExternalConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/DATAGUARD" + } + } + log.Info("Updated connect strings to the dataguard broker") + return nil +} + +// ########################################################################################################### +// +// Update Reconcile Status +// +// ########################################################################################################### +func updateReconcileStatus(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (err error) { + + log := r.Log.WithValues("updateReconcileStatus", req.NamespacedName) + + // fetch the singleinstancedatabase (database sid) and their role in the dataguard configuration + var databases []string + databases, err = GetDatabasesInDataGuardConfigurationWithRole(r, broker, ctx, req) + if err != nil { + log.Info("Problem when retrieving the databases in dg config") + broker.Status.Status = dbcommons.StatusNotReady + r.Status().Update(ctx, broker) + return nil + } + + // loop over all the databases to update the status of the dataguardbroker and the singleinstancedatabase + var standbyDatabases string = "" + for i := 0; i < len(databases); i++ { + splitstr := strings.Split(databases[i], ":") + database := strings.ToUpper(splitstr[0]) + var singleInstanceDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Name: broker.Status.DatabasesInDataguardConfig[database], Namespace: req.Namespace}, &singleInstanceDatabase) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Checking current role of %v is %v and its status is %v", broker.Status.DatabasesInDataguardConfig[database], strings.ToUpper(splitstr[1]), singleInstanceDatabase.Status.Role)) + if singleInstanceDatabase.Status.Role != strings.ToUpper(splitstr[1]) { + singleInstanceDatabase.Status.Role = strings.ToUpper(splitstr[1]) + r.Status().Update(ctx, &singleInstanceDatabase) + } + if strings.ToUpper(splitstr[1]) == "PRIMARY" && strings.ToUpper(database) != strings.ToUpper(broker.Status.PrimaryDatabase) { + log.Info("primary Database is " + strings.ToUpper(database)) + broker.Status.PrimaryDatabase = strings.ToUpper(database) + // patch the service with the current primary + } + if strings.ToUpper(splitstr[1]) == "PHYSICAL_STANDBY" { + if standbyDatabases != "" { + standbyDatabases += "," + strings.ToUpper(splitstr[0]) + } else { + standbyDatabases = strings.ToUpper(splitstr[0]) + } + } + } + + broker.Status.StandbyDatabases = standbyDatabases + broker.Status.ProtectionMode = broker.Spec.ProtectionMode + r.Status().Update(ctx, broker) + + // patch the dataguardbroker resource service + if err := patchService(r, broker, ctx, req); err != nil { + return err + } + + return nil +} + +// ##################################################################################################### +// +// Get the avail FSFO targets for a given singleinstancedatabase sid +// +// ##################################################################################################### +func GetFSFOTargets(databaseSid string, databasesInDgConfig map[string]string) (string, error) { + if _, ok := databasesInDgConfig[databaseSid]; !ok { + return "", fmt.Errorf("database %s not in dataguard config", databasesInDgConfig[databaseSid]) + } + var fsfoTarget []string + for dbSid, _ := range databasesInDgConfig { + if strings.Compare(databaseSid, dbSid) != 0 { + fsfoTarget = append(fsfoTarget, dbSid) + } + } + return strings.Join(fsfoTarget, ","), nil +} + +// ##################################################################################################### +// +// Set faststartfailover targets accordingly to dataguard configuration +// +// ##################################################################################################### +func setFSFOTargets(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("setFSFOTargets", req.NamespacedName) + + // fetch the current primary singleinstancedatabase + var currentPrimaryDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: broker.GetCurrentPrimaryDatabase()}, ¤tPrimaryDatabase) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("Resource not found") + return nil + } + r.Log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("current primary database for the dg config is %s", currentPrimaryDatabase.Name)) + + // fetch the singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, currentPrimaryDatabase.Spec.Image.Version, + currentPrimaryDatabase.Spec.Image.PullFrom, currentPrimaryDatabase.Name, currentPrimaryDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return fmt.Errorf("error while fetching ready pod for %s", currentPrimaryDatabase.Name) + } + + log.Info(fmt.Sprintf("current primary database ready pod is %s", sidbReadyPod.Name)) + + // fetch singleinstancedatabase admin password + var adminPasswordSecret corev1.Secret + if err = r.Get(ctx, types.NamespacedName{Name: currentPrimaryDatabase.Spec.AdminPassword.SecretName, Namespace: currentPrimaryDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + currentPrimaryDatabase.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + currentPrimaryDatabase.Spec.AdminPassword.SecretName + " Not Found") + return errors.New("admin password secret not found") + } + log.Error(err, err.Error()) + return err + } + adminPassword := string(adminPasswordSecret.Data[currentPrimaryDatabase.Spec.AdminPassword.SecretKey]) + + for databaseSid, databaseRef := range broker.Status.DatabasesInDataguardConfig { + // construct FSFO target for this database + fsfoTargets, err := GetFSFOTargets(databaseSid, broker.Status.DatabasesInDataguardConfig) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Setting fast start failover target for the database %s to %s", databaseRef, fsfoTargets)) + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"EDIT DATABASE %s SET PROPERTY FASTSTARTFAILOVERTARGET=%s \" | dgmgrl sys/%s@%s ", + databaseSid, fsfoTargets, adminPassword, currentPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("SETTING FSFO TARGET OUTPUT") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"SHOW DATABASE %s FASTSTARTFAILOVERTARGET \" | dgmgrl sys/%s@%s ", databaseSid, adminPassword, currentPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("FSFO TARGETS OF " + databaseSid) + log.Info(out) + } + + // Set FSFO Targets according to the input yaml of broker + return nil +} + +// ############################################################################# +// +// Setup the requested dataguard configuration +// +// ############################################################################# +func createObserverPods(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("createObserverPods", req.NamespacedName) + + // fetch the current primary singleinstancedatabase resourcce + var currPrimaryDatabase dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &currPrimaryDatabase); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", namespacedName.Name)) + r.Log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch the dataguardbroker observer replicas + _, brokerReplicasFound, _, _, err := dbcommons.FindPods(r, "", "", broker.Name, broker.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + if brokerReplicasFound > 0 { + return nil + } + + // Stop the already running observer + // find the avail pods for the currPrimaryDatabase + log.Info("Need to stop the observer if already running") + currPrimaryDatabaseReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", currPrimaryDatabase.Name, currPrimaryDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + if currPrimaryDatabaseReadyPod.Name == "" { + return errors.New("No ready pods avail ") + } + + // fetch singleinstancedatabase admin password + var adminPasswordSecret corev1.Secret + if err = r.Get(ctx, types.NamespacedName{Name: currPrimaryDatabase.Spec.AdminPassword.SecretName, Namespace: currPrimaryDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + currPrimaryDatabase.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + currPrimaryDatabase.Spec.AdminPassword.SecretName + " Not Found") + return errors.New("admin password secret not found") + } + log.Error(err, err.Error()) + return err + } + adminPassword := string(adminPasswordSecret.Data[currPrimaryDatabase.Spec.AdminPassword.SecretKey]) + + out, err := dbcommons.ExecCommand(r, r.Config, currPrimaryDatabaseReadyPod.Name, currPrimaryDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \" STOP OBSERVER %s \" | dgmgrl sys/%s@%s ", broker.Name, adminPassword, currPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info(out) + // instantiate observer pod specification + pod := dbcommons.NewRealPodBuilder(). + SetNamespacedName(types.NamespacedName{ + Name: broker.Name + "-" + dbcommons.GenerateRandomString(5), + Namespace: broker.Namespace, + }). + SetLabels(map[string]string{ + "app": broker.Name, + "version": currPrimaryDatabase.Spec.Image.PullSecrets, + }). + SetTerminationGracePeriodSeconds(int64(30)). + SetNodeSelector(func() map[string]string { + var nsRule map[string]string = map[string]string{} + if len(broker.Spec.NodeSelector) != 0 { + for key, value := range broker.Spec.NodeSelector { + nsRule[key] = value + } + } + return nsRule + }()). + SetSecurityContext(corev1.PodSecurityContext{ + RunAsUser: func() *int64 { i := int64(54321); return &i }(), + FSGroup: func() *int64 { i := int64(54321); return &i }(), + }). + SetImagePullSecrets(currPrimaryDatabase.Spec.Image.PullSecrets). + AppendContainers(corev1.Container{ + Name: broker.Name, + Image: currPrimaryDatabase.Spec.Image.PullFrom, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "/bin/echo -en 'shutdown abort;\n' | env ORACLE_SID=${ORACLE_SID^^} sqlplus -S / as sysdba"}, + }, + }, + }, + ImagePullPolicy: corev1.PullAlways, + Ports: []corev1.ContainerPort{{ContainerPort: 1521}, {ContainerPort: 5500}}, + + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "$ORACLE_BASE/checkDBLockStatus.sh"}, + }, + }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: 40, + }, + Env: []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: broker.Name, + }, + { + Name: "SVC_PORT", + Value: "1521", + }, + { + Name: "PRIMARY_DB_CONN_STR", + Value: currPrimaryDatabase.Name + ":1521/" + currPrimaryDatabase.Spec.Sid, + }, + { + Name: "DG_OBSERVER_ONLY", + Value: "true", + }, + { + Name: "DG_OBSERVER_NAME", + Value: broker.Name, + }, + { + // Sid used here only for Locking mechanism to work . + Name: "ORACLE_SID", + Value: "OBSRVR" + strings.ToUpper(currPrimaryDatabase.Spec.Sid), + }, + { + Name: "ORACLE_PWD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: currPrimaryDatabase.Spec.AdminPassword.SecretName, + }, + Key: currPrimaryDatabase.Spec.AdminPassword.SecretKey, + }, + }, + }, + }, + }). + Build() + + // set the ownership and lifecyle of the observer pod to the dataguardbroker resource + ctrl.SetControllerReference(broker, &pod, r.Scheme) + + log.Info("Creating a new POD", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) + if err = r.Create(ctx, &pod); err != nil { + log.Error(err, "Failed to create new POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return err + } + + // Waiting for Pod to get created as sometimes it takes some time to create a Pod . 30 seconds TImeout + timeout := 30 + err = dbcommons.WaitForStatusChange(r, pod.Name, broker.Namespace, ctx, req, time.Duration(timeout)*time.Second, "pod", "creation") + if err != nil { + log.Error(err, "Error in Waiting for Pod status for Creation", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return err + } + log.Info("Succesfully Created New Pod ", "POD.NAME : ", pod.Name) + + eventReason := "SUCCESS" + eventMsg := "" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + return nil +} + +// ############################################################################# +// +// Enable faststartfailover for the dataguard configuration +// +// ############################################################################# +func enableFSFOForDgConfig(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("enableFSFOForDgConfig", req.NamespacedName) + + // Get the current primary singleinstancedatabase resourcce + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + // fetch singleinstancedatabase adminpassword secret + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Enabling FastStartFailover", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s", broker.Name)) + log.Info(fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s", broker.Name)) + + // enable faststartfailover for the dataguard configuration + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl sys/%s@%s ", dbcommons.EnableFSFOCMD, adminPassword, sidb.Status.Sid)) + if err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Enabling FastStartFailover failed", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s failed", broker.Name)) + log.Error(err, err.Error()) + return err + } + log.Info("EnableFastStartFailover Output") + log.Info(out) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Enabling FastStartFailover successful", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s successful", broker.Name)) + + return nil +} + +// ############################################################################# +// +// Disable faststartfailover for the dataguard configuration +// +// ############################################################################# +func disableFSFOForDGConfig(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("disableFSFOForDGConfig", req.NamespacedName) + + // Get the current primary singleinstancedatabase resource + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + // fetch admin password for the singleinstancedatabase + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Disabling FastStartFailover", fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s", broker.Name)) + log.Info(fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s", broker.Name)) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl sys/%s@%s ", fmt.Sprintf(dbcommons.DisableFSFOCMD, broker.Name), adminPassword, sidb.Status.Sid)) + if err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Disabling FastStartFailover failed", fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s failed", broker.Name)) + log.Error(err, err.Error()) + return err + } + log.Info("DisableFastStartFailover Output") + log.Info(out) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Disabling FastStartFailover", "faststartfailover disabled successfully") + log.Info("faststartfailover disabled successfully") + + return nil +} + +// ############################################################################# +// +// Get databases in dataguard configuration along with their roles +// +// ############################################################################# +func GetDatabasesInDataGuardConfigurationWithRole(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) ([]string, error) { + r.Log.Info(fmt.Sprintf("GetDatabasesInDataGuardConfiguration are %v", broker.GetDatabasesInDataGuardConfiguration())) + for _, database := range broker.GetDatabasesInDataGuardConfiguration() { + + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(context.TODO(), types.NamespacedName{Namespace: broker.Namespace, Name: database}, &singleInstanceDatabase); err != nil { + // log about the error while fetching the database + continue + } + + // Fetch the primary database ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, singleInstanceDatabase.Spec.Image.Version, + singleInstanceDatabase.Spec.Image.PullFrom, singleInstanceDatabase.Name, singleInstanceDatabase.Namespace, ctx, req) + if err != nil || sidbReadyPod.Name == "" { + continue + } + + // try out + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba ", dbcommons.DataguardBrokerGetDatabaseCMD)) + if err != nil || strings.Contains(out, "no rows selected") && strings.Contains(out, "ORA-") { + continue + } + + r.Log.Info(fmt.Sprintf("sidbReadyPod is %v \n output of the exec is %v \n and output contains ORA- is %v", sidbReadyPod.Name, out, strings.Contains(out, "ORA-"))) + + out1 := strings.Replace(out, " ", "_", -1) + // filtering output and storing databses in dg configuration in "databases" slice + databases := strings.Fields(out1) + + // first 2 values in the slice will be column name(DATABASES) and a seperator(--------------) . so take the slice from position [2:] + databases = databases[2:] + return databases, nil + } + + return []string{}, errors.New("cannot get databases in dataguard configuration") +} diff --git a/controllers/dataguard/dataguardbroker_controller.go b/controllers/dataguard/dataguardbroker_controller.go new file mode 100644 index 00000000..4d7ae044 --- /dev/null +++ b/controllers/dataguard/dataguardbroker_controller.go @@ -0,0 +1,513 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/go-logr/logr" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// DataguardBrokerReconciler reconciles a DataguardBroker object +type DataguardBrokerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Config *rest.Config + Recorder record.EventRecorder +} + +const dataguardBrokerFinalizer = "database.oracle.com/dataguardbrokerfinalizer" + +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +func (r *DataguardBrokerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("reconciler", req.NamespacedName) + + log.Info("Reconcile requested") + + // Get the dataguardbroker resource if already exists + var dataguardBroker dbapi.DataguardBroker + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource deleted") + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + + // Manage dataguardbroker deletion + if !dataguardBroker.DeletionTimestamp.IsZero() { + return r.manageDataguardBrokerDeletion(&dataguardBroker, ctx, req) + } + + // initialize the dataguardbroker resource status + if dataguardBroker.Status.Status == "" { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeNormal, "Status Initialization", "initializing status fields for the resource") + log.Info("Initializing status fields") + dataguardBroker.Status.Status = dbcommons.StatusCreating + dataguardBroker.Status.ExternalConnectString = dbcommons.ValueUnavailable + dataguardBroker.Status.ClusterConnectString = dbcommons.ValueUnavailable + dataguardBroker.Status.FastStartFailover = "false" + if len(dataguardBroker.Status.DatabasesInDataguardConfig) == 0 { + dataguardBroker.Status.DatabasesInDataguardConfig = map[string]string{} + } + } + + // Always refresh status before a reconcile + defer r.Status().Update(ctx, &dataguardBroker) + + // Mange DataguardBroker Creation + result, err := r.manageDataguardBrokerCreation(&dataguardBroker, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if result.Requeue { + return result, nil + } + + // manage enabling and disabling faststartfailover + if dataguardBroker.Spec.FastStartFailover { + + for _, DbResource := range dataguardBroker.Status.DatabasesInDataguardConfig { + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: DbResource}, &singleInstanceDatabase); err != nil { + return ctrl.Result{Requeue: false}, err + } + r.Log.Info("Check the role for database", "database", singleInstanceDatabase.Name, "role", singleInstanceDatabase.Status.Role) + if singleInstanceDatabase.Status.Role == "SNAPSHOT_STANDBY" { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Enabling FSFO failed", "database %s is a snapshot database", singleInstanceDatabase.Name) + r.Log.Info("Enabling FSFO failed, one of the database is a snapshot database", "snapshot database", singleInstanceDatabase.Name) + return ctrl.Result{Requeue: true}, nil + } + } + + // set faststartfailover targets for all the singleinstancedatabases in the dataguard configuration + if err := setFSFOTargets(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // enable faststartfailover in the dataguard configuration + if err := enableFSFOForDgConfig(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // create Observer Pod + if err := createObserverPods(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // set faststartfailover status to true + dataguardBroker.Status.FastStartFailover = "true" + + } else { + + // disable faststartfailover + if err := disableFSFOForDGConfig(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // delete Observer Pod + observerReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", dataguardBroker.Name, dataguardBroker.Namespace, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if observerReadyPod.Name != "" { + if err := r.Delete(ctx, &observerReadyPod); err != nil { + return ctrl.Result{Requeue: false}, err + } + } + + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeNormal, "Observer Deleted", "database observer pod deleted") + log.Info("database observer deleted") + + // set faststartfailover status to false + dataguardBroker.Status.FastStartFailover = "false" + } + + // manage manual switchover + if dataguardBroker.Spec.SetAsPrimaryDatabase != "" && dataguardBroker.Spec.SetAsPrimaryDatabase != dataguardBroker.Status.PrimaryDatabase { + if _, ok := dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase]; !ok { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Cannot Switchover", fmt.Sprintf("database with SID %v not found in dataguardbroker configuration", dataguardBroker.Spec.SetAsPrimaryDatabase)) + log.Info(fmt.Sprintf("cannot perform switchover, database with SID %v not found in dataguardbroker configuration", dataguardBroker.Spec.SetAsPrimaryDatabase)) + return ctrl.Result{Requeue: false}, nil + } + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Manual Switchover", fmt.Sprintf("Switching over to %s database", dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase])) + log.Info(fmt.Sprintf("switching over to %s database", dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase])) + result, err := r.manageManualSwitchOver(dataguardBroker.Spec.SetAsPrimaryDatabase, &dataguardBroker, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if result.Requeue { + return result, nil + } + } + + // Update Status for broker and sidb resources + if err := updateReconcileStatus(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + dataguardBroker.Status.Status = dbcommons.StatusReady + log.Info("Reconcile Completed") + + if dataguardBroker.Spec.FastStartFailover { + return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil + } else { + return ctrl.Result{Requeue: false}, nil + } +} + +// ############################################################################################################################# +// +// Manage deletion and clean up of the dataguardBroker resource +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageDataguardBrokerDeletion(broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("manageDataguardBrokerDeletion", req.NamespacedName) + + log.Info(fmt.Sprintf("Deleting dataguard broker %v", broker.Name)) + // Check if the DataguardBroker instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + if controllerutil.ContainsFinalizer(broker, dataguardBrokerFinalizer) { + // Run finalization logic for dataguardBrokerFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := cleanupDataguardBroker(r, broker, req, ctx); err != nil { + // handle the errors + return ctrl.Result{Requeue: false}, err + } + + // Remove dataguardBrokerFinalizer. Once all finalizers have been + // removed, the object will be deleted. + controllerutil.RemoveFinalizer(broker, dataguardBrokerFinalizer) + if err := r.Update(ctx, broker); err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Updating Resource", "Error while removing resource finalizers") + log.Info("Error while removing resource finalizers") + return ctrl.Result{Requeue: false}, err + } + } + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Manage validation of singleinstancedatabases and creation of the dataguard configuration +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageDataguardBrokerCreation(broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("manageDataguardBrokerCreation", req.NamespacedName) + + // Add finalizer for this dataguardbroker resource + if !controllerutil.ContainsFinalizer(broker, dataguardBrokerFinalizer) { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Updating Resource", "Adding finalizers") + log.Info("Adding finalizer") + controllerutil.AddFinalizer(broker, dataguardBrokerFinalizer) + if err := r.Update(ctx, broker); err != nil { + return ctrl.Result{Requeue: false}, err + } + } + + // Check if a service for the dataguardbroker resources exists + var service corev1.Service + if err := r.Get(context.TODO(), types.NamespacedName{Name: broker.Name, Namespace: broker.Namespace}, &service); err != nil { + // check if the required service is not found then create the service + if apierrors.IsNotFound(err) { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Creating Service", "creating service for the resource") + log.Info("creating service for the dataguardbroker resource") + + // instantiate the service specification + svc := dbcommons.NewRealServiceBuilder(). + SetName(broker.Name). + SetNamespace(broker.Namespace). + SetLabels(map[string]string{ + "app": broker.Name, + }). + SetAnnotation(func() map[string]string { + annotations := make(map[string]string) + if len(broker.Spec.ServiceAnnotations) != 0 { + for key, value := range broker.Spec.ServiceAnnotations { + annotations[key] = value + } + } + return annotations + }()). + SetPorts([]corev1.ServicePort{ + { + Name: "listener", + Port: 1521, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "xmldb", + Port: 5500, + Protocol: corev1.ProtocolTCP, + }, + }). + SetSelector(map[string]string{ + "app": broker.Name, + }). + SetType(func() corev1.ServiceType { + if broker.Spec.LoadBalancer { + return corev1.ServiceType("LoadBalancer") + } + return corev1.ServiceType("NodePort") + }()). + Build() + + // Set the ownership of the service object to the dataguard broker resource object + ctrl.SetControllerReference(broker, &svc, r.Scheme) + + // create the service for dataguardbroker resource + if err = r.Create(ctx, &svc); err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Service Creation", "service creation failed") + log.Info("service creation failed") + return ctrl.Result{Requeue: false}, err + } else { + timeout := 30 + // Waiting for Service to get created as sometimes it takes some time to create a service . 30 seconds TImeout + err = dbcommons.WaitForStatusChange(r, svc.Name, broker.Namespace, ctx, req, time.Duration(timeout)*time.Second, "svc", "creation") + if err != nil { + log.Error(err, "Error in Waiting for svc status for Creation", "svc.Namespace", svc.Namespace, "SVC.Name", svc.Name) + return ctrl.Result{Requeue: false}, err + } + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Service Created", fmt.Sprintf("Succesfully Created New Service %v", svc.Name)) + log.Info("Succesfully Created New Service ", "Service.Name : ", svc.Name) + } + time.Sleep(10 * time.Second) + } else { + return ctrl.Result{Requeue: false}, err + } + } + + log.Info(" ", "Found Existing Service ", service.Name) + + // validate if all the databases have only one replicas + for _, databaseRef := range broker.GetDatabasesInDataGuardConfiguration() { + var singleinstancedatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Name: databaseRef, Namespace: broker.Namespace}, &singleinstancedatabase); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", singleinstancedatabase.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", databaseRef)) + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + if broker.Spec.FastStartFailover && singleinstancedatabase.Status.Replicas > 1 { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SIDB Not supported", "dataguardbroker doesn't support multiple replicas sidb in FastStartFailover mode") + log.Info("dataguardbroker doesn't support multiple replicas sidb in FastStartFailover mode") + broker.Status.Status = dbcommons.StatusError + return ctrl.Result{Requeue: false}, nil + } + } + + // Get the current primary singleinstancedatabase resourcce + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + if sidb.Status.Role != "PRIMARY" { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Spec Validation", fmt.Sprintf("singleInstanceDatabase %v not in primary role", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s expected to be in primary role", sidb.Name)) + log.Info("updating database status to check for possible FSFO") + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + return ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second}, nil + } + + // validate current primary singleinstancedatabase readiness + log.Info(fmt.Sprintf("Validating readiness for singleinstancedatabase %v", sidb.Name)) + if err := validateSidbReadiness(r, broker, &sidb, ctx, req); err != nil { + if errors.Is(err, ErrCurrentPrimaryDatabaseNotReady) { + fastStartFailoverStatus, _ := strconv.ParseBool(broker.Status.FastStartFailover) + if broker.Status.Status != "" && fastStartFailoverStatus { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Possible Failover", "Primary db not in ready state after setting up DG configuration") + } + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + log.Info("Error updating Dgbroker status") + } + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Waiting", err.Error()) + return ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second}, nil + } + return ctrl.Result{Requeue: false}, err + } + + // setup dataguard configuration + log.Info(fmt.Sprintf("setup Dataguard configuration for primary database %v", sidb.Name)) + if err := setupDataguardBrokerConfiguration(r, broker, &sidb, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Manange manual switchover to the target database +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageManualSwitchOver(targetSidbSid string, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("SetAsPrimaryDatabase", req.NamespacedName) + + if _, ok := broker.Status.DatabasesInDataguardConfig[targetSidbSid]; !ok { + eventReason := "Cannot Switchover" + eventMsg := fmt.Sprintf("Database %s not a part of the dataguard configuration", targetSidbSid) + r.Recorder.Eventf(broker, corev1.EventTypeWarning, eventReason, eventMsg) + return ctrl.Result{Requeue: false}, nil + } + + // change broker status to updating to indicate manual switchover start + broker.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, broker) + + var sidb dbapi.SingleInstanceDatabase + if err := r.Get(context.TODO(), types.NamespacedName{Name: broker.GetCurrentPrimaryDatabase(), Namespace: broker.Namespace}, &sidb); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Fetch the primary database ready pod to create chk file + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Fetch the target database ready pod to create chk file + targetReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", broker.Status.DatabasesInDataguardConfig[targetSidbSid], req.Namespace, + ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: broker.Status.DatabasesInDataguardConfig[targetSidbSid], Namespace: req.Namespace}}) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Create a chk File so that no other pods take the lock during Switchover . + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("Successfully Created chk file " + out) + + out, err = dbcommons.ExecCommand(r, r.Config, targetReadyPod.Name, targetReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("Successfully Created chk file " + out) + + eventReason := "Waiting" + eventMsg := "Switchover In Progress" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + // Get Admin password for current primary database + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return ctrl.Result{Requeue: false}, err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("DB Admin pwd file created") + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s \"SWITCHOVER TO %s\" < admin.pwd", broker.Status.PrimaryDatabase, targetSidbSid)) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("SWITCHOVER TO " + targetSidbSid + " Output") + log.Info(out) + + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Setup the controller with the Manager +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.DataguardBroker{}). + Owns(&corev1.Pod{}). //Watch for deleted pods of DataguardBroker Owner + WithEventFilter(dbcommons.ResourceEventHandler()). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). //ReconcileHandler is never invoked concurrently with the same object. + Complete(r) +} diff --git a/controllers/observability/databaseobserver_controller.go b/controllers/observability/databaseobserver_controller.go new file mode 100644 index 00000000..e17ee0b3 --- /dev/null +++ b/controllers/observability/databaseobserver_controller.go @@ -0,0 +1,502 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apiError "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "time" + + api "github.com/oracle/oracle-database-operator/apis/observability/v4" + constants "github.com/oracle/oracle-database-operator/commons/observability" +) + +// DatabaseObserverReconciler reconciles a DatabaseObserver object +type DatabaseObserverReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +//+kubebuilder:rbac:groups=observability.oracle.com,resources=databaseobservers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=observability.oracle.com,resources=databaseobservers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=observability.oracle.com,resources=databaseobservers/finalizers,verbs=update +//+kubebuilder:rbac:groups=apps,resources=pods;deployments;services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=configmaps,verbs=get;list +//+kubebuilder:rbac:groups="",resources=pods;deployments;services;events,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets;configmaps,verbs=get;list +//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the DatabaseObserver object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile +func (r *DatabaseObserverReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + r.Log.WithName(constants.LogReconcile).Info(constants.LogCRStart, "NamespacedName", req.NamespacedName) + + // fetch databaseObserver + a := &api.DatabaseObserver{} + if e := r.Get(context.TODO(), req.NamespacedName, a); e != nil { + + // if CR is not found or does not exist then + // consider either CR has been deleted + if apiError.IsNotFound(e) { + r.Log.WithName(constants.LogReconcile).Info(constants.LogCREnd) + return ctrl.Result{}, nil + } + + r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorCRRetrieve) + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonFailedCRRetrieval, constants.EventMessageFailedCRRetrieval) + return ctrl.Result{}, e + + } + + // evaluate overall custom resource readiness at the end of the stack + defer r.validateCustomResourceReadiness(ctx, req) + + // initialize databaseObserver custom resource + if e := r.initialize(ctx, a, req); e != nil { + return ctrl.Result{}, e + } + + // validate specs + if e := r.validateSpecs(a); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonDeploymentSpecValidationFailed, + Message: constants.MessageExporterDeploymentSpecValidationFailed, + }) + if e := r.Status().Update(ctx, a); e != nil { + r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorStatusUpdate) + } + r.Log.WithName(constants.LogExportersDeploy).Error(e, constants.ErrorSpecValidationFailedDueToAnError) + return ctrl.Result{}, e + } + + // create resource if they do not exist + exporterDeployment := &ObservabilityDeploymentResource{} + if res, e := r.createResourceIfNotExists(exporterDeployment, a, ctx, req); e != nil { + return res, e + } + + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterDeployment, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, + }) + return res, e + } + + exporterService := &ObservabilityServiceResource{} + if res, e := r.createResourceIfNotExists(exporterService, a, ctx, req); e != nil { + return res, e + } + + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterService, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterServiceReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, + }) + return res, e + } + + exporterServiceMonitor := &ObservabilityServiceMonitorResource{} + if res, e := r.createResourceIfNotExists(exporterServiceMonitor, a, ctx, req); e != nil { + return res, e + } + + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterServiceMonitor, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterServiceMonitorReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, + }) + return res, e + } + + // check if deployment pods are ready + return r.validateDeploymentReadiness(a, ctx, req) +} + +// initialize method sets the initial status to PENDING, exporterConfig and sets the base condition +func (r *DatabaseObserverReconciler) initialize(ctx context.Context, a *api.DatabaseObserver, req ctrl.Request) error { + + if a.Status.Conditions == nil || len(a.Status.Conditions) == 0 { + + // set condition + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsCRAvailable, + Status: metav1.ConditionFalse, + Reason: constants.ReasonInitStart, + Message: constants.MessageCRInitializationStarted, + }) + + a.Status.Status = string(constants.StatusObservabilityPending) + a.Status.ExporterConfig = constants.UnknownValue + a.Status.Version = constants.UnknownValue + if e := r.Status().Update(ctx, a); e != nil { + r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorStatusUpdate) + return e + } + + } + + return nil +} + +// validateSpecs method checks the values and secrets passed in the spec +func (r *DatabaseObserverReconciler) validateSpecs(a *api.DatabaseObserver) error { + + // If either Vault Fields are empty, then assume a DBPassword secret is supplied. If the DBPassword secret not found, then error out + if a.Spec.Database.DBPassword.VaultOCID == "" || a.Spec.Database.DBPassword.VaultSecretName == "" { + dbSecret := &corev1.Secret{} + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBPassword.SecretName, Namespace: a.Namespace}, dbSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPasswordSecretMissing) + return e + } + } + + // Does DB Connection String Secret Name actually exist + dbConnectSecret := &corev1.Secret{} + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBConnectionString.SecretName, Namespace: a.Namespace}, dbConnectSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBConnectionStringSecretMissing) + return e + } + + // Does DB User String Secret Name actually exist + dbUserSecret := &corev1.Secret{} + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBUser.SecretName, Namespace: a.Namespace}, dbUserSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPUserSecretMissing) + return e + } + + // Does a custom configuration configmap actually exist, if provided + if configurationCMName := a.Spec.ExporterConfig.Configmap.Name; configurationCMName != "" { + configurationCM := &corev1.ConfigMap{} + if e := r.Get(context.TODO(), types.NamespacedName{Name: configurationCMName, Namespace: a.Namespace}, configurationCM); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorConfigmapMissing) + return e + } + } + + // Does DBWallet actually exist, if provided + if dbWalletSecretName := a.Spec.Database.DBWallet.SecretName; dbWalletSecretName != "" { + dbWalletSecret := &corev1.Secret{} + if e := r.Get(context.TODO(), types.NamespacedName{Name: dbWalletSecretName, Namespace: a.Namespace}, dbWalletSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBWalletSecretMissing) + return e + } + } + + return nil // valid, did not encounter any errors +} + +// createResourceIfNotExists method creates an ObserverResource if they have not yet been created +func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResource, a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + conditionType, logger, groupVersionKind := or.identify() + + // update after + defer r.Status().Update(ctx, a) + + // generate desired object based on a.Spec + desiredObj, genErr := or.generate(a, r.Scheme) + if genErr != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + Reason: constants.ReasonGeneralResourceGenerationFailed, + Message: constants.MessageResourceGenerationFailed, + }) + return ctrl.Result{}, genErr + } + + // if resource exists, retrieve the resource + foundObj := &unstructured.Unstructured{} + foundObj.SetGroupVersionKind(groupVersionKind) + getErr := r.Get(context.TODO(), types.NamespacedName{Name: desiredObj.GetName(), Namespace: req.Namespace}, foundObj) + + // if resource not found, create resource then return + if getErr != nil && apiError.IsNotFound(getErr) { + + if e := r.Create(context.TODO(), desiredObj); e != nil { // create + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + Reason: constants.ReasonGeneralResourceCreationFailed, + Message: constants.MessageResourceCreationFailed, + }) + r.Log.WithName(logger).Error(e, constants.ErrorResourceCreationFailure, "ResourceName", desiredObj.GetName(), "Kind", groupVersionKind, "Namespace", req.Namespace) + return ctrl.Result{}, e + } + + // mark ready if created + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionTrue, + Reason: constants.ReasonGeneralResourceCreated, + Message: constants.MessageResourceCreated, + }) + r.Log.WithName(logger).Info(constants.LogResourceCreated, "ResourceName", desiredObj.GetName(), "Kind", groupVersionKind, "Namespace", req.Namespace) + + } else if getErr != nil { // if an error occurred + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + Reason: constants.ReasonGeneralResourceValidationFailureDueToError, + Message: constants.MessageResourceReadinessValidationFailed, + }) + r.Log.WithName(logger).Error(getErr, constants.ErrorResourceRetrievalFailureDueToAnError, "ResourceName", desiredObj.GetName(), "Kind", groupVersionKind, "Namespace", req.Namespace) + return ctrl.Result{}, getErr + + } else if getErr == nil && conditionType != constants.IsExporterDeploymentReady { // exclude deployment + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionTrue, + Reason: constants.ReasonGeneralResourceValidationCompleted, + Message: constants.MessageResourceReadinessValidated, + }) + r.Log.WithName(logger).Info(constants.LogResourceFound, "ResourceName", desiredObj.GetName(), "Kind", groupVersionKind, "Namespace", req.Namespace) + + } + + // if no other error and resource, other than Deployments, have already been created before, end validation and return + return ctrl.Result{}, nil +} + +// checkResourceForUpdates method checks the resource if it needs to be updated, updates if changes are found +func (r *DatabaseObserverReconciler) checkResourceForUpdates(or ObserverResource, a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + conditionType, logName, groupVersionKind := or.identify() + + // generate desired object + dO, genErr := or.generate(a, r.Scheme) + if genErr != nil { + return ctrl.Result{}, genErr + } + + // convert dO -> d + d := &unstructured.Unstructured{} + d.SetGroupVersionKind(groupVersionKind) + if e := r.Scheme.Convert(dO, d, nil); e != nil { + return ctrl.Result{}, e + } + + // declare found + // retrieve latest into f + f := &unstructured.Unstructured{} + f.SetGroupVersionKind(groupVersionKind) + if e := r.Get(context.TODO(), types.NamespacedName{Name: dO.GetName(), Namespace: req.Namespace}, f); e != nil { + return ctrl.Result{}, e + } + + // check if something changed + if !equality.Semantic.DeepDerivative(d.Object, f.Object) { + + if e := r.Update(context.TODO(), d); e != nil { + r.Log.WithName(logName).Error(e, constants.LogErrorWithResourceUpdate, "ResourceName", f.GetName(), "Kind", groupVersionKind.Kind, "Namespace", req.Namespace) + return ctrl.Result{}, e + } + + // update completed, however the pods needs to be validated for readiness + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdated, + Message: constants.MessageExporterResourceUpdated, + }) + r.Log.WithName(logName).Info(constants.LogSuccessWithResourceUpdate, "ResourceName", f.GetName(), "Kind", groupVersionKind.Kind, "Namespace", req.Namespace) + r.Recorder.Event(a, corev1.EventTypeNormal, constants.EventReasonUpdateSucceeded, groupVersionKind.Kind+" is updated.") + r.Status().Update(ctx, a) + } + + return ctrl.Result{}, nil + +} + +// validateDeploymentReadiness method evaluates deployment readiness by checking the status of all deployment pods +func (r *DatabaseObserverReconciler) validateDeploymentReadiness(a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + d := &appsv1.Deployment{} + rName := a.Name + + // update after + defer r.Status().Update(ctx, a) + + // get latest deployment + if e := r.Get(context.TODO(), types.NamespacedName{Name: rName, Namespace: a.Namespace}, d); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonGeneralResourceValidationFailureDueToError, + Message: constants.MessageExporterDeploymentValidationFailed, + }) + return ctrl.Result{}, e + } + + // get deployment labels + cLabels := client.MatchingLabels{ + "app": a.Name, + } + + // list pods + pods := &corev1.PodList{} + if e := r.List(context.TODO(), pods, []client.ListOption{cLabels}...); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonDeploymentFailed, + Message: constants.MessageExporterDeploymentListingFailed, + }) + return ctrl.Result{}, e + } + + // check each pod phase + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodFailed { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonDeploymentFailed, + Message: constants.MessageExporterDeploymentFailed, + }) + return ctrl.Result{}, errors.New(constants.ErrorDeploymentPodsFailure) + + } else if pod.Status.Phase != corev1.PodRunning { // pod could be creating, + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionUnknown, + Reason: constants.ReasonDeploymentPending, + Message: constants.MessageExporterDeploymentPending, + }) + return ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}, nil + } + } + + // once all pods are found to be running, mark deployment as ready and the exporter as ready + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterDeploymentReady, + Status: metav1.ConditionTrue, + Reason: constants.ReasonDeploymentSuccessful, + Message: constants.MessageExporterDeploymentSuccessful, + }) + a.Status.Version = constants.GetExporterVersion(a) + a.Status.ExporterConfig = constants.GetExporterConfig(a) + return ctrl.Result{}, nil +} + +// validateCustomResourceReadiness method evaluates CR readiness by cycling through all conditions and checking for any condition with False Status +func (r *DatabaseObserverReconciler) validateCustomResourceReadiness(ctx context.Context, req ctrl.Request) { + + // get latest object + a := &api.DatabaseObserver{} + if e := r.Get(context.TODO(), req.NamespacedName, a); e != nil { + r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorCRRetrieve) + return + } + + // make update + defer r.Status().Update(ctx, a) + + if meta.IsStatusConditionPresentAndEqual(a.Status.Conditions, constants.IsExporterDeploymentReady, metav1.ConditionUnknown) { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsCRAvailable, + Status: metav1.ConditionFalse, + Reason: constants.ReasonValidationInProgress, + Message: constants.MessageCRValidationWaiting, + }) + a.Status.Status = string(constants.StatusObservabilityPending) + } else if meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterDeploymentReady) || + meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterServiceReady) || + meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterServiceMonitorReady) { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsCRAvailable, + Status: metav1.ConditionFalse, + Reason: constants.ReasonReadyFailed, + Message: constants.MessageCRValidationFailed, + }) + a.Status.Status = string(constants.StatusObservabilityError) + } else { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsCRAvailable, + Status: metav1.ConditionTrue, + Reason: constants.ReasonReadyValidated, + Message: constants.MessageCRValidated, + }) + a.Status.Status = string(constants.StatusObservabilityReady) + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *DatabaseObserverReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&api.DatabaseObserver{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + Complete(r) +} diff --git a/controllers/observability/databaseobserver_resource.go b/controllers/observability/databaseobserver_resource.go new file mode 100644 index 00000000..6be6f693 --- /dev/null +++ b/controllers/observability/databaseobserver_resource.go @@ -0,0 +1,199 @@ +package controllers + +import ( + api "github.com/oracle/oracle-database-operator/apis/observability/v4" + constants "github.com/oracle/oracle-database-operator/commons/observability" + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +/* +This handler file contains all the methods that +retrieve/find and create all related resources +on Kubernetes. +*/ + +type ObservabilityDeploymentResource struct{} +type ObservabilityServiceResource struct{} +type ObservabilityServiceMonitorResource struct{} + +type ObserverResource interface { + generate(*api.DatabaseObserver, *runtime.Scheme) (*unstructured.Unstructured, error) + identify() (string, string, schema.GroupVersionKind) +} + +func (resource *ObservabilityDeploymentResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rName := a.Name + rContainerName := constants.DefaultExporterContainerName + rContainerImage := constants.GetExporterImage(a) + rArgs := constants.GetExporterArgs(a) + rCommands := constants.GetExporterCommands(a) + rVolumes := constants.GetExporterDeploymentVolumes(a) + rVolumeMounts := constants.GetExporterDeploymentVolumeMounts(a) + + rReplicas := constants.GetExporterReplicas(a) + rEnvs := constants.GetExporterEnvs(a) + + rLabels := constants.GetLabels(a, a.Spec.Exporter.Deployment.Labels) + rPodLabels := constants.GetLabels(a, a.Spec.Exporter.Deployment.DeploymentPodTemplate.Labels) + rSelector := constants.GetSelectorLabel(a) + + rDeploymentSecurityContext := constants.GetExporterDeploymentSecurityContext(a) + rPodSecurityContext := constants.GetExporterPodSecurityContext(a) + + rPort := []corev1.ContainerPort{ + {ContainerPort: constants.DefaultAppPort}, + } + + // exporterContainer + rContainers := make([]corev1.Container, 1) + rContainers[0] = corev1.Container{ + Image: rContainerImage, + ImagePullPolicy: corev1.PullAlways, + Name: rContainerName, + Env: rEnvs, + VolumeMounts: rVolumeMounts, + Ports: rPort, + Args: rArgs, + Command: rCommands, + SecurityContext: rDeploymentSecurityContext, + } + + constants.AddSidecarContainers(a, &rContainers) + constants.AddSidecarVolumes(a, &rVolumes) + + // additionalContainers + + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: rName, + Namespace: a.Namespace, + Labels: rLabels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &rReplicas, + Selector: &metav1.LabelSelector{ + MatchLabels: rSelector, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: rPodLabels, + }, + Spec: corev1.PodSpec{ + Containers: rContainers, + RestartPolicy: corev1.RestartPolicyAlways, + Volumes: rVolumes, + SecurityContext: rPodSecurityContext, + }, + }, + }, + } + + if err := controllerutil.SetControllerReference(a, obj, scheme); err != nil { + return nil, err + } + + var u = &unstructured.Unstructured{} + if err := scheme.Convert(obj, u, nil); err != nil { + return nil, err + } + return u, nil +} + +func (resource *ObservabilityServiceResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rServiceName := a.Name + rLabels := constants.GetLabels(a, a.Spec.Exporter.Service.Labels) + rSelector := constants.GetSelectorLabel(a) + rPorts := constants.GetExporterServicePort(a) + + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: rServiceName, + Labels: rLabels, + Namespace: a.Namespace, + }, + Spec: corev1.ServiceSpec{ + Type: constants.DefaultServiceType, + Selector: rSelector, + Ports: rPorts, + }, + } + + if err := controllerutil.SetControllerReference(a, obj, scheme); err != nil { + return nil, err + } + + var u = &unstructured.Unstructured{} + if err := scheme.Convert(obj, u, nil); err != nil { + return nil, err + } + return u, nil +} + +func (resource *ObservabilityServiceMonitorResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rName := a.Name + rEndpoints := constants.GetEndpoints(a) + + rSelector := constants.GetSelectorLabel(a) + rLabels := constants.GetLabels(a, a.Spec.Prometheus.ServiceMonitor.Labels) + + smSpec := monitorv1.ServiceMonitorSpec{ + Endpoints: rEndpoints, + Selector: metav1.LabelSelector{ + MatchLabels: rSelector, + }, + } + constants.AddNamespaceSelector(a, &smSpec) + + obj := &monitorv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: rName, + Labels: rLabels, + Namespace: a.Namespace, + }, + Spec: smSpec, + } + + // set reference + if e := controllerutil.SetControllerReference(a, obj, scheme); e != nil { + return nil, e + } + + // convert + var u = &unstructured.Unstructured{} + if e := scheme.Convert(obj, u, nil); e != nil { + return nil, e + } + + return u, nil +} + +func (resource *ObservabilityDeploymentResource) identify() (string, string, schema.GroupVersionKind) { + return constants.IsExporterDeploymentReady, constants.LogExportersDeploy, schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + } +} + +func (resource *ObservabilityServiceResource) identify() (string, string, schema.GroupVersionKind) { + return constants.IsExporterServiceReady, constants.LogExportersSVC, schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + } +} + +func (resource *ObservabilityServiceMonitorResource) identify() (string, string, schema.GroupVersionKind) { + return constants.IsExporterServiceMonitorReady, constants.LogExportersServiceMonitor, schema.GroupVersionKind{ + Group: "monitoring.coreos.com", + Version: "v1", + Kind: "ServiceMonitor", + } +} diff --git a/controllers/observability/suite_test.go b/controllers/observability/suite_test.go new file mode 100644 index 00000000..4500ff5a --- /dev/null +++ b/controllers/observability/suite_test.go @@ -0,0 +1,100 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + observabilityv1alpha1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = observabilityv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/doc/adb/ADB_PREREQUISITES.md b/doc/adb/ADB_PREREQUISITES.md deleted file mode 100644 index 057a8687..00000000 --- a/doc/adb/ADB_PREREQUISITES.md +++ /dev/null @@ -1,100 +0,0 @@ -# - -## Oracle Autonomous Database (ADB) Prerequisites - -Oracle Database Operator for Kubernetes must have access to OCI services. - -To provide access, choose **one of the following approaches**: - -* The provider uses [API Key authentication](#authorized-with-api-key-authentication) - -* The Kubernetes cluster nodes are [granted with Instance Principal](#authorized-with-instance-principal) - -### Authorized with API Key Authentication - -By default, all pods in the Oracle Container Engine for Kubernetes (OKE) are able to access the instance principal certificates, so that the operator calls OCI REST endpoints without any extra step. If you're using OKE, then please proceed to the installation. -If the operator is deployed in a third-party Kubernetes cluster, then the credentials of the Oracle Cloud Infrastructure (OCI) user are needed. The operator reads these credentials from a ConfigMap and a Secret. - -Oracle recommends using the helper script `set_ocicredentials.sh` in the root directory of the repository; This script will generate a ConfigMap and a Secret with the OCI credentials. By default, the script parses the **DEFAULT** profile in `~/.oci/config`. The default names of the ConfigMap and the Secret are, respectively: `oci-cred` and `oci-privatekey`. - -```sh -./set_ocicredentials.sh run -``` - -You can change the default values as follows: - -```sh -./set_ocicredentials.sh run -path -profile -configmap -secret -``` - -Alternatively, you can create these values manually. The ConfigMap should contain the following items: `tenancy`, `user`, `fingerprint`, `region`, `passphrase`. The Secret should contain an entry named `privatekey`. - -```sh -kubectl create configmap oci-cred \ ---from-literal=tenancy= \ ---from-literal=user= \ ---from-literal=fingerprint= \ ---from-literal=region= \ ---from-literal=passphrase=(*) - -kubectl create secret generic oci-privatekey \ ---from-file=privatekey= -``` - -> Note: passphrase is deprecated. You can ignore that line. - -After creating the ConfigMap and the Secret, use their names as the values of `ociConfigMap` and `ociSecret` attributes in the yaml files for provisioning, binding, and other operations. - -### Authorized with Instance Principal - -Instance principal authorization enables the operator to make API calls from an instance (that is, a node) without requiring the `ociConfigMap`, and `ociSecret` attributes in the `.yaml` file. - -> Note: Instance principal authorization applies only to instances that are running in the Oracle Cloud Infrastructure (OCI). - -To set up Instance Principle authorization: - -1. Get the `compartment OCID`: - - Log in to the cloud console, and click **Compartment**. - - ![compartment-1](/images/adb/compartment-1.png) - - Choose the compartment where the cluster creates instances, and **copy** the OCID in the details page. - - ![compartment-2](/images/adb/compartment-2.png) - -2. Create a dynamic group and matching rules: - - Go to the **Dynamic Groups** page, and click **Create Dynamic Group**. - - ![instance-principal-1](/images/adb/instance-principal-1.png) - - In the **Matching Rules** section, write the following rule. Change `compartment-OCID` to the OCID of your compartment. This rule enables all the resources, including **nodes** in the compartment, to be members of the dynamic group. - - ```sh - All {instance.compartment.id = 'compartment-OCID'} - ``` - - ![instance-principal-2](/images/adb/instance-principal-2.png) - - To apply the rules, click **Create**. - -3. Set up policies for dynamic groups: - - Go to **Policies**, and click **Create Policy**. - - ![instance-principal-3](/images/adb/instance-principal-3.png) - - This example enables the dynamic group to manage all the resources in your tenancy: - - ```sh - Allow dynamic-group to manage all-resources in tenancy - ``` - - You can also specify a particular resouce access for the dynamic group. This example enables the dynamic group to manage Oracle Autonomous Database in a given compartment: - - ```sh - Allow dynamic-group to manage autonomous-database-family in compartment - ``` - -At this stage, the operator has been granted sufficient permissions to call OCI services. You can now proceed to the installation. diff --git a/doc/adb/README.md b/doc/adb/README.md deleted file mode 100644 index 3055c1bc..00000000 --- a/doc/adb/README.md +++ /dev/null @@ -1,375 +0,0 @@ -# Managing Oracle Autonomous Databases with Oracle Database Operator for Kubernetes - -Before you use the Oracle Database Operator for Kubernetes (the operator), ensure your system meets all of the Oracle Autonomous Database (ADB) Prerequisites [ADB_PREREQUISITES](./ADB_PREREQUISITES.md). - -## Supported Features - -After the operator is deployed, choose either one of the following operations to create an `AutonomousDatabase` custom resource for Oracle Autonomous Database in your cluster. - -* [Provision](#provision-an-autonomous-database) an Autonomous Database -* [Bind](#bind-to-an-existing-autonomous-database) to an existing Autonomous Database - -After you create the resource, you can use the operator to perform the following tasks: - -* [Scale the OCPU core count or storage](#scale-the-ocpu-core-count-or-storage) an Autonomous Database -* [Rename](#rename) an Autonomous Database -* [Manage ADMIN database user password](#manage-admin-passsword) of an Autonomous Database -* [Download instance credentials (wallets)](#download-wallets) of an Autonomous Database -* [Stop/Start/Terminate](#stopstartterminate) an Autonomous Database -* [Delete the resource](#delete-the-resource) from the cluster - -## Provision an Autonomous Database - -Follow the steps to provision an Autonomous Database that will bind objects in your cluster. - -1. Get the `Compartment OCID`. - - Login cloud console and click `Compartment`. - - ![compartment-1](/images/adb/compartment-1.png) - - Click on the compartment name where you want to create your database, and **copy** the `OCID` of the compartment. - - ![compartment-2](/images/adb/compartment-2.png) - -2. Create a Kubernetes Secret to hold the password of the ADMIN user. - - You can create this secret with the following command (as an example): - - ```sh - kubectl create secret generic admin-password --from-literal=admin-password='password_here' - ``` - -3. Add the following fields to the AutonomousDatabase resource definition. An example `.yaml` file is available here: [`config/samples/autonomousdatabase_create.yaml`](./../../config/samples/autonomousdatabase_create.yaml) - | Attribute | Type | Description | Required? | - |----|----|----|----| - | `spec.details.compartmentOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | - | `spec.details.dbName` | string | The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. | Yes | - | `spec.details.displayName` | string | The user-friendly name for the Autonomous Database. The name does not have to be unique. | Yes | - | `spec.details.cpuCoreCount` | int | The number of OCPU cores to be made available to the database. | Yes | - | `spec.details.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecretName` or `ociSecretOCID` must be provided. If both `k8sSecretName` and `ociSecretOCID` appear, the Operator reads the password from the K8s secret that `k8sSecretName` refers to. | Yes | - | `spec.details.adminPassword.k8sSecretName` | string | The **name** of the K8s Secret where you want to hold the password for the ADMIN user. | Conditional | - |`spec.details.adminPassword.ociSecretOCID` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | - | `spec.details.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. | Yes | - | `spec.details.isAutoScalingEnabled` | boolean | Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE` | No | - | `spec.details.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm) | No | - | `spec.details.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | - | `spec.details.dbWorkload` | string | The Oracle Autonomous Database workload type. The following values are valid:
- OLTP - indicates an Autonomous Transaction Processing database
- DW - indicates an Autonomous Data Warehouse database
- AJD - indicates an Autonomous JSON Database
- APEX - indicates an Autonomous Database with the Oracle APEX Application Development workload type. | No | - | `spec.details.dbVersion` | string | A valid Oracle Database release for Oracle Autonomous Database. | No | - | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | - | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | - | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - compartmentOCID: ocid1.compartment... - dbName: NewADB - displayName: NewADB - cpuCoreCount: 1 - adminPassword: - k8sSecretName: admin-password # use the name of the secret from step 2 - dataStorageSizeInTBs: 1 - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - -4. Apply the yaml: - - ```sh - kubectl apply -f config/samples/autonomousdatabase_create.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample created - ``` - -## Bind to an existing Autonomous Database - -Other than provisioning a database, you can bind to an existing database in your cluster. - -1. Clean up the resource you created in the earlier provision operation: - - ```sh - kubectl delete adb/autonomousdatabase-sample - autonomousdatabase.database.oracle.com/autonomousdatabase-sample deleted - ``` - -2. Copy the `Autonomous Database OCID` from Cloud Console. - - ![adb-id-1](/images/adb/adb-id-1.png) - - ![adb-id-2](/images/adb/adb-id-2.png) - -3. Add the following fields to the AutonomousDatabase resource definition. An example `.yaml` file is available here: [`config/samples/autonomousdatabase_bind.yaml`](./../../config/samples/autonomousdatabase_bind.yaml) - | Attribute | Type | Description | Required? | - |----|----|----|----| - | `spec.details.autonomousDatabaseOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database you want to bind (create a reference) in your cluster. | Yes | - | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | - | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | - | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - -4. Apply the yaml. - - ```sh - kubectl apply -f config/samples/autonomousdatabase_bind.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample created - ``` - -## Scale the OCPU core count or storage - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes either the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. - -Users can scale up or scale down the Oracle Autonomous Database OCPU core count or storage by updating the `cpuCoreCount` and `dataStorageSizeInTBs` parameters. The `isAutoScalingEnabled` indicates whether auto scaling is enabled. Here is an example of scaling the CPU count and storage size (TB) up to 2 and turning off the auto-scaling by updating the `autonomousdatabase-sample` custom resource. - -1. An example YAML file is available here: [config/samples/autonomousdatabase_scale.yaml](./../../config/samples/autonomousdatabase_scale.yaml) - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - cpuCoreCount: 2 - dataStorageSizeInTBs: 2 - isAutoScalingEnabled: false - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - -2. Apply the change using `kubectl`. - - ```sh - kubectl apply -f config/samples/autonomousdatabase_scale.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -## Rename - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been completed, and the operator is authorized with API Key Authentication. - -You can rename the database by changing the values of the `dbName` and `displayName`, as follows: - -1. An example YAML file is available here: [config/samples/autonomousdatabase_rename.yaml](./../../config/samples/autonomousdatabase_rename.yaml) - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - dbName: RenamedADB - displayName: RenamedADB - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - - * `dbName`: The database name. It must begin with an alphabetic character. It can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. - * `displayNameName`: User-friendly name of the database. The name does not have to be unique. - -2. Apply the change using `kubectl`. - - ```sh - kubectl apply -f config/samples/autonomousdatabase_rename.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -## Manage Admin Passsword - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been completed, and the operator is authorized with API Key Authentication. - -1. Create a Kubernetes Secret to hold the new password of the ADMIN user. - - As an example, you can create this secret with the following command: * - - ```sh - kubectl create secret generic new-adb-admin-password --from-literal=new-adb-admin-password='password_here' - ``` - - \* The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing. - -2. Update the example [config/samples/autonomousdatabase_change_admin_password.yaml](./../../config/samples/autonomousdatabase_change_admin_password.yaml) - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - adminPassword: - k8sSecretName: new-admin-password - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - - * `adminPassword.k8sSecretName`: the **name** of the secret that you created in **step1**. - -3. Apply the YAML. - - ```sh - kubectl apply -f config/samples/autonomousdatabase_change_admin_password.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -## Download Wallets - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. - -A client Wallet is required to connect to a shared Oracle Autonomous Database. User has to provide a wallet password to download the Wallet. In the following example, the Operator will read the password from a Kubernetes Secret to download the Wallet. After that, the downloaded Wallet will be unzipped and stored as byte values in a new Kubernetes Secret `instance-wallet`. - -1. Create a Kubernetes Secret to hold the wallet password. - - As an example, you can create this secret with the following command: * - - ```sh - kubectl create secret generic instance-wallet-password --from-literal=instance-wallet-password='password_here' - ``` - - \* The password must be at least 8 characters long and must include at least 1 letter and either 1 numeric character or 1 special character. - -2. Update the example [config/samples/autonomousdatabase_wallet.yaml](./../../config/samples/autonomousdatabase_wallet.yaml) - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - wallet: - name: instance-wallet - password: - k8sSecretName: instance-wallet-password - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - - * `wallet.name`: the name of the new Secret where you want the downloaded Wallet to be stored. - * `wallet.password.k8sSecretName`: the **name** of the secret you created in **step1**. - -3. Apply the YAML - - ```sh - kubectl apply -f config/samples/autonomousdatabase_wallet.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -You should see a new Secret `instance-wallet` in your cluster: - -```sh -$ kubectl get secrets -NAME TYPE DATA AGE -oci-privatekey Opaque 1 2d12h -instance-wallet-password Opaque 1 2d12h -instance-wallet Opaque 8 2d12h -``` - -To use the secret in a deployment, refer to [Using Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets) for the examples. - -## Stop/Start/Terminate - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. - -Users can start/stop/terminate a database using the `lifecycleState` attribute. -Here's a list of the values you can set for `lifecycleState`: - -* `AVAILABLE`: to start the database -* `STOPPED`: to stop the database -* `TERMINATED`: to terminate the database - -1. A sample .yaml file is available here: [config/samples/autonomousdatabase_stop_start_terminate.yaml](./../../config/samples/autonomousdatabase_stop_start_terminate.yaml) - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - lifecycleState: STOPPED - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - -2. Apply the change to stop the database. - - ```sh - kubectl apply -f config/samples/autonomousdatabase_stop_start_terminate.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -## Delete the resource - -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. - -The `hardLink` defines the behavior when the resource is deleted from the cluster. If the `hardLink` is set to true, the Operator terminates the Autonomous Database in OCI when the resource is removed; otherwise, the database remains unchanged. By default the value is `false` if it is not explicitly specified. - -Follow the steps to delete the resource and terminate the Autonomous Database. - -1. Use the example [autonomousdatabase_delete_resource.yaml](./../../config/samples/autonomousdatabase_delete_resource.yaml) which sets the attribute `hardLink` to true. - - ```yaml - --- - apiVersion: database.oracle.com/v1alpha1 - kind: AutonomousDatabase - metadata: - name: autonomousdatabase-sample - spec: - details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - hardLink: true - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - ``` - -2. Apply the yaml - - ```sh - kubectl apply -f config/samples/autonomousdatabase_delete_resource.yaml - autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured - ``` - -3. Delete the resource in your cluster - - ```sh - kubectl delete adb/autonomousdatabase-sample - autonomousdatabase.database.oracle.com/autonomousdatabase-sample deleted - ``` - -Now, you can verify that the database is in TERMINATING state on the Cloud Console. diff --git a/doc/sharding/README.md b/doc/sharding/README.md deleted file mode 100644 index beb155d6..00000000 --- a/doc/sharding/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# Using Oracle Sharding with Oracle Database Operator for Kubernetes - -Oracle Sharding distributes segments of a data set across many databases (shards) on different computers, either on-premises or in cloud. Sharding enables globally distributed, linearly scalable, multimodel databases. It requires no specialized hardware or software. Oracle Sharding does all this while rendering the strong consistency, full power of SQL, support for structured and unstructured data, and the Oracle Database ecosystem. It meets data sovereignty requirements, and supports applications that require low latency and high availability. - -All of the shards together make up a single logical database, which is referred to as a sharded database (SDB). - -Kubernetes provides infrastructure building blocks, such as compute, storage, and networks. Kubernetes makes the infrastructure available as code. It enables rapid provisioning of multi-node topolgies. Additionally, Kubernetes also provides statefulsets, which are the workload API objects that are used to manage stateful applications. This provides us lifecycle management elasticity for databases as a stateful application for various database topologies, such as sharded databases, Oracle Real Application Clusters (Oracle RAC), single instance Oracle Database, and other Oracle features and configurations. - -The Sharding Database controller in Oracle Database Operator deploys Oracle Sharding topology as a statefulset in the Kubernetes clusters, using Oracle Database and Global Data Services Docker images. The Oracle Sharding database controller manages the typical lifecycle of Oracle Sharding topology in the Kubernetes cluster, as shown below: - -* Create primary statefulsets shards -* Create master and standby Global Data Services statefulsets -* Create persistent storage, along with statefulset -* Create services -* Create load balancer service -* Provision sharding topology by creating and configuring the following: - * Catalog database - * Shard Databases - * GSMs - * Shard scale up and scale down -* Shard topology cleanup - -The Oracle Sharding database controller provides end-to-end automation of Oracle Database sharding topology deployment in Kubernetes clusters. - -## Using Oracle Sharding Database Operator - -To create a Sharding Topology, complete the steps in the following sections below: - -1. [Prerequsites for running Oracle Sharding Database Controller](#prerequsites-for-running-oracle-sharding-database-controller) -2. [Provisioning Sharding Topology in a Cloud based Kubernetes Cluster (OKE in this case)](#provisioning-sharding-topology-in-a-cloud-based-kubernetes-cluster-oke-in-this-case) -3. [Connecting to Shard Databases](#connecting-to-shard-databases) -4. [Debugging and Troubleshooting](#debugging-and-troubleshooting) - -**Note** Before proceeding to the next section, you must complete the instructions given in each section, based on your enviornment, before proceeding to next section. - -## Prerequsites for Running Oracle Sharding Database Controller - -**IMPORTANT :** You must make the changes specified in this section before you proceed to the next section. - -### 1. Kubernetes Cluster: To deploy Oracle Sharding database controller with Oracle Database Operator, you need a Kubernetes Cluster which can be one of the following: - -* A Cloud-based Kubernetes cluster, such as [OCI on Container Engine for Kubernetes (OKE)](https://www.oracle.com/cloud-native/container-engine-kubernetes/) or -* An On-Premises Kubernetes Cluster, such as [Oracle Linux Cloud Native Environment (OLCNE)](https://docs.oracle.com/en/operating-systems/olcne/) cluster. - -To use Oracle Sharding Database Controller, ensure that your system is provisioned with a supported Kubernetes release. Refer to the [Release Status Section](../../README.md#release-status). - -### 2. Deploy Oracle Database Operator - -To deploy Oracle Database Operator in a Kubernetes cluster, go to the section [Quick Install of the Operator](../../README.md#oracle-database-kubernetes-operator-deployment) in the README, and complete the operator deployment before you proceed further. If you have already deployed the operator, then proceed to the next section. - -### 3. Oracle Database and Global Data Services Docker Images -Choose one of the following deployment options: - - **Use Oracle-Supplied Docker Images:** - The Oracle Sharding Database controller uses Oracle Global Data Services and Oracle Database images to provision the sharding topology. - - You can also download the pre-built Oracle Global Data Services `container-registry.oracle.com/database/gsm:latest` and Oracle Database images `container-registry.oracle.com/database/enterprise:latest` from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of sharding topology by deploying on OKE and OLCNE. - - - **OR** - - **Build your own Oracle Database and Global Data Services Docker Images:** - You can build these images using instructions provided on Oracle official GitHub Repositories: - * [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) - * [Oracle Database Image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance) - -After the images are ready, push them to your Docker Images Repository, so that you can pull them during Oracle Database Sharding topology provisioning. - -You can either download the images and push them to your Docker Images Repository, or, if your Kubernetes cluster can reach OCR, you can download these images directly from OCR. - -**Note**: In the sharding example yaml files, we are using GDS and database images available on [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). - -### 4. Create a namespace for the Oracle DB Sharding Setup - - Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Database Sharding Setup will be provisioned in this namespace named `shns`. For example: - - ```sh - #### Create the namespace - kubectl create ns shns - - #### Check the created namespace - kubectl get ns - ``` - -### 5. Create a Kubernetes secret for the database installation owner for the database Sharding Deployment - -Create a Kubernetes secret named `db-user-pass` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) - -After you have the above prerequsites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. - -## Provisioning Sharding Topology in a Cloud-Based Kubernetes Cluster (OKE in this case) - -Deploy Oracle Database sharding topology on your Cloud based Kubernetes cluster. In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database sharding topology. - -[1. Provisioning Oracle Database sharding topology without Database Gold Image](./provisioning/provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Database sharding topology with additional control on resources like Memory and CPU allocated to Pods](./provisioning/provisioning_with_control_on_resources.md) -[3. Provisioning a Persistent Volume having an Oracle Database Gold Image](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) -[4. Provisioning Oracle Database sharding topology by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/provisioning_by_cloning_db_gold_image_in_same_ad.md) -[5. Provisioning Oracle Database sharding topology by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/provisioning_by_cloning_db_from_gold_image_across_ads.md) -[6. Provisioning Oracle Database sharding topology and send Notification using OCI Notification Service](./provisioning/provisioning_with_notification_using_oci_notification.md) -[7. Scale Out - Add Shards to an existing Oracle Database Sharding Topology](./provisioning/scale_out_add_shards.md) -[8. Scale In - Delete an existing Shard from a working Oracle Database sharding topology](./provisioning/scale_in_delete_an_existing_shard.md) - -## Connecting to Shard Databases - -After the Oracle Database Sharding Topology has been provisioned using the Sharding Controller in Oracle Database Kubernetes Operator, you can follow the steps in this document to connect to the Sharded Database or to the individual Shards: [Database Connectivity](./provisioning/database_connection.md) - -## Debugging and Troubleshooting - -To debug the Oracle Database Sharding Topology provisioned using the Sharding Controller of Oracle Database Kubernetes Operator, follow this document: [Debugging and troubleshooting](./provisioning/debugging.md) diff --git a/doc/sharding/provisioning/create_kubernetes_secret_for_db_user.md b/doc/sharding/provisioning/create_kubernetes_secret_for_db_user.md deleted file mode 100644 index 0d66f49b..00000000 --- a/doc/sharding/provisioning/create_kubernetes_secret_for_db_user.md +++ /dev/null @@ -1,25 +0,0 @@ -# Create kubernetes secret for db user - -Create a Kubernetes secret named "db-user-pass" using a password in a text file and then encrypt it using an `openssl` key. The text file will be removed after secret is created. - -```sh -mkdir /tmp/.secrets/ - -# Generate a random openssl key -openssl rand -hex 64 -out /tmp/.secrets/pwd.key - -# Use a password you want and add it to a text file -echo ORacle_21c > /tmp/.secrets/common_os_pwdfile - -# Encrypt the file with the password with the random openssl key generated above -openssl enc -aes-256-cbc -md md5 -salt -in /tmp/.secrets/common_os_pwdfile -out /tmp/.secrets/common_os_pwdfile.enc -pass file:/tmp/.secrets/pwd.key - -# Remove the password text file -rm -f /tmp/.secrets/common_os_pwdfile - -# Create the Kubernetes secret in namespace "shns" -kubectl create secret generic db-user-pass --from-file=/tmp/.secrets/common_os_pwdfile.enc --from-file=/tmp/.secrets/pwd.key -n shns - -# Check the secret details -kubectl get secret -n shns -``` diff --git a/doc/sharding/provisioning/debugging.md b/doc/sharding/provisioning/debugging.md deleted file mode 100644 index 545bf034..00000000 --- a/doc/sharding/provisioning/debugging.md +++ /dev/null @@ -1,43 +0,0 @@ -# Debugging and Troubleshooting - -When the Oracle Database Sharding Topology is provisioned using the Oracle Database Kubernetes Operator, the debugging of an issue with the deployment depends on at which stage the issue has been seen. - -Below are the possible cases and the steps to debug such an issue: - -## Failure during the provisioning of Kubernetes Pods - -In case the failure occurs during the provisioning, we need to check the status of the Kubernetes Pod which has failed to deployed. - -Use the below command to check the logs of the Pod which has a failure. For example, for failure in case of Pod `pod/catalog-0`, use below command: - -```sh -kubectl logs -f pod/catalog-0 -n shns -``` - -In case the Pod has failed to provision due to an issue with the Docker Image, you will see the error `Error: ErrImagePull` in above logs. - -If the Pod has not yet got initialized, use the below command to find the reason for it: - -```sh -kubectl describe pod/catalog-0 -n shns -``` - -In case the failure is related to the Cloud Infrastructure, you will need to troubleshooting that using the documentation from the cloud provider. - -## Failure in the provisioning of the Sharded Database - -In case the failure occures after the Kubernetes Pods are created but during the execution of the scripts to create the shard databases, catalog database or the GSM, you will need to trobleshoot that at the individual Pod level. - -Initially, check the logs of the Kubernetes Pod using the command like below (change the name of the Pod with the actual Pod) - -```sh -kubectl logs -f pod/catalog-0 -n shns -``` - -To check the logs at the GSM or at the Database level or at the host level, switch to the corresponding Kubernetes container using the command like below: - -```sh -kubectl exec -it catalog-0 -n shns /bin/bash -``` - -Now, you can troubleshooting the corresponding component using the alert log or the trace files etc just like a normal Sharding Database Deployment. Please refer to [Oracle Database Sharding Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) for this purpose. diff --git a/doc/sharding/provisioning/provisioning_by_cloning_db_gold_image_in_same_ad.md b/doc/sharding/provisioning/provisioning_by_cloning_db_gold_image_in_same_ad.md deleted file mode 100644 index 531c3839..00000000 --- a/doc/sharding/provisioning/provisioning_by_cloning_db_gold_image_in_same_ad.md +++ /dev/null @@ -1,43 +0,0 @@ -# Provisioning Oracle Database Sharding Topology by Cloning the Database from Your Own Database Gold Image in the same Availability Domain (AD) - -In this case, the database is created automatically by cloning from an existing Oracle Database Gold Image during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology is deployed using Oracle Sharding controller. - -This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. - -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. - -**NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. - -1. Check the OCID of the Persistent Volume provisioned by above step using below command: - - ```sh - kubectl get pv -n shns - ``` - -2. This example uses `shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: - -* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` -* One Catalog Pod: `catalog` -* Namespace: `shns` -* Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.eu-frankfurt-1.abtheljtmwcwf7liuhaibzgdcoxqcwwfpsqiqlsumrjlzkin7y4zx3x2idua` - -In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) - * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) - -Use the file: [shard_prov_clone.yaml](./shard_prov_clone.yaml) for this use case as below: - -1. Deploy the `shard_prov_clone.yaml` file: - ```sh - kubectl apply -f shard_prov_clone.yaml - ``` -2. Check the status of the deployment: - ```sh - # Check the status of the Kubernetes Pods: - kubectl get all -n shns - - # Check the logs of a particular pod. For example, to check status of pod "shard1-0": - kubectl logs -f pod/shard1-0 -n shns - ``` diff --git a/doc/sharding/provisioning/provisioning_with_control_on_resources.md b/doc/sharding/provisioning/provisioning_with_control_on_resources.md deleted file mode 100644 index 734cad89..00000000 --- a/doc/sharding/provisioning/provisioning_with_control_on_resources.md +++ /dev/null @@ -1,39 +0,0 @@ -# Provisioning Oracle Database Sharding Topology with Additional Control on Resources Allocated to Pods - -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology is deployed using Oracle Sharding controller. - -This example uses `shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: - -* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` -* One Catalog Pod: `catalog` -* Namespace: `shns` -* Tags `memory` and `cpu` to control the Memory and CPU of the PODs -* Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level - -In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) - * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov_memory_cpu.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) - -Use the YAML file [shard_prov_memory_cpu.yaml](./shard_prov_memory_cpu.yaml). - -1. Deploy the `shard_prov_memory_cpu.yaml` file: - - ```sh - kubectl apply -f shard_prov_memory_cpu.yaml - ``` - -1. Check the details of a POD. For example: To check the details of Pod `shard1-0`: - - ```sh - kubectl describe pod/shard1-0 -n shns - ``` -3. Check the status of the deployment: - ```sh - # Check the status of the Kubernetes Pods: - kubectl get all -n shns - - # Check the logs of a particular pod. For example, to check status of pod "shard1-0": - kubectl logs -f pod/shard1-0 -n shns - ``` diff --git a/doc/sharding/provisioning/provisioning_without_db_gold_image.md b/doc/sharding/provisioning/provisioning_without_db_gold_image.md deleted file mode 100644 index 0a908b52..00000000 --- a/doc/sharding/provisioning/provisioning_without_db_gold_image.md +++ /dev/null @@ -1,34 +0,0 @@ -# Provisioning Oracle Database Sharding Topology Without Database Gold Image - -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology is deployed using Oracle Sharding controller. - -**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. - -This example uses `shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: - -* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` -* One Catalog Pod: `catalog` -* Namespace: `shns` - - -In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) - * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) - - -Use the file: [shard_prov.yaml](./shard_prov.yaml) for this use case as below: - -1. Deploy the `shard_prov.yaml` file: - ```sh - kubectl apply -f shard_prov.yaml - ``` -1. Check the status of the deployment: - ```sh - # Check the status of the Kubernetes Pods: - kubectl get all -n shns - - # Check the logs of a particular pod. For example, to check status of pod "shard1-0": - kubectl logs -f pod/shard1-0 -n shns - ``` diff --git a/doc/sharding/provisioning/scale_in_delete_an_existing_shard.md b/doc/sharding/provisioning/scale_in_delete_an_existing_shard.md deleted file mode 100644 index 9334741b..00000000 --- a/doc/sharding/provisioning/scale_in_delete_an_existing_shard.md +++ /dev/null @@ -1,44 +0,0 @@ -# Scale In - Delete an existing Shard From a Working Oracle Database Sharding Topology - -This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology provisioned using Oracle Database Sharding controller. - -**NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. - -In this use case, the existing database Sharding is having: - -* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` -* One Catalog Pod: `catalog` -* Namespace: `shns` - -In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) - * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) - -NOTE: Use tag `isDelete: true` to delete the shard you want. - -This use case deletes the shard `shard2` from the above Sharding Topology. - -Use the file: [shard_prov_delshard.yaml](./shard_prov_delshard.yaml) for this use case as below: - -1. Deploy the `shard_prov_delshard.yaml` file: - ```sh - kubectl apply -f shard_prov_delshard.yaml - ``` -2. Check the status of the deployment: - ```sh - # Check the status of the Kubernetes Pods: - kubectl get all -n shns - -**NOTE:** After you apply `shard_prov_delshard.yaml`, the change may not be visible immediately. When the shard is removed, first the chunks will be moved out of that shard that is going to be deleted. - -To monitor the chunk movement, use the following command: - -```sh -# Switch to the primary GSM Container: -kubectl exec -i -t gsm1-0 -n shns /bin/bash - -# Check the status of the chunks and repeat to observe the chunk movement: -gdsctl config chunks -``` diff --git a/doc/sharding/provisioning/scale_out_add_shards.md b/doc/sharding/provisioning/scale_out_add_shards.md deleted file mode 100644 index 174d8c6c..00000000 --- a/doc/sharding/provisioning/scale_out_add_shards.md +++ /dev/null @@ -1,31 +0,0 @@ -# Scale Out - Add Shards to an existing Oracle Database Sharding Topology - -This use case demonstrates adding a new shard to an existing Oracle Database sharding topology provisioned earlier using Oracle Database Sharding controller. - -In this use case, the existing Oracle Database sharding topology is having: - -* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` -* One Catalog Pod: `catalog` -* Namespace: `shns` - -In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) - * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) - -This use case adds three new shards `shard3`,`shard4`,`shard4` to above Sharding Topology. - -Use the file: [shard_prov_extshard.yaml](./shard_prov_extshard.yaml) for this use case as below: - -1. Deploy the `shard_prov_extshard.yaml` file: - ```sh - kubectl apply -f shard_prov_extshard.yaml - ``` -2. Check the status of the deployment: - ```sh - # Check the status of the Kubernetes Pods: - kubectl get all -n shns - - # Check the logs of a particular pod. For example, to check status of pod "shard3-0": - kubectl logs -f pod/shard3-0 -n shns diff --git a/doc/sharding/provisioning/shard_prov.yaml b/doc/sharding/provisioning/shard_prov.yaml deleted file mode 100644 index 508192d7..00000000 --- a/doc/sharding/provisioning/shard_prov.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - - name: shard2 - storageSizeInGb: 50 - catalog: - - name: catalog - storageSizeInGb: 50 - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isDeleteOraPvc: True - namespace: shns - diff --git a/doc/sharding/provisioning/shard_prov_clone.yaml b/doc/sharding/provisioning/shard_prov_clone.yaml deleted file mode 100644 index 0acc0ec5..00000000 --- a/doc/sharding/provisioning/shard_prov_clone.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.eu-frankfurt-1.abtheljtmwcwf7liuhaibzgdcoxqcwwfpsqiqlsumrjlzkin7y4zx3x2idua - - name: shard2 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.eu-frankfurt-1.abtheljtmwcwf7liuhaibzgdcoxqcwwfpsqiqlsumrjlzkin7y4zx3x2idua - catalog: - - name: catalog - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.eu-frankfurt-1.abtheljtmwcwf7liuhaibzgdcoxqcwwfpsqiqlsumrjlzkin7y4zx3x2idua - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isClone: True - isDeleteOraPvc: True - namespace: shns diff --git a/doc/sharding/provisioning/shard_prov_clone_across_ads.yaml b/doc/sharding/provisioning/shard_prov_clone_across_ads.yaml deleted file mode 100644 index 98d9ee56..00000000 --- a/doc/sharding/provisioning/shard_prov_clone_across_ads.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - - name: shard2 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - catalog: - - name: catalog - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-3" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-3" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isClone: True - isDeleteOraPvc: True - namespace: shns diff --git a/doc/sharding/provisioning/shard_prov_delshard.yaml b/doc/sharding/provisioning/shard_prov_delshard.yaml deleted file mode 100644 index 91dfdd28..00000000 --- a/doc/sharding/provisioning/shard_prov_delshard.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - - name: shard2 - storageSizeInGb: 50 - isDelete: true - - name: shard3 - storageSizeInGb: 50 - - name: shard4 - storageSizeInGb: 50 - - name: shard5 - storageSizeInGb: 50 - catalog: - - name: catalog - storageSizeInGb: 50 - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isDeleteOraPvc: True - namespace: shns diff --git a/doc/sharding/provisioning/shard_prov_extshard.yaml b/doc/sharding/provisioning/shard_prov_extshard.yaml deleted file mode 100644 index 953fe23f..00000000 --- a/doc/sharding/provisioning/shard_prov_extshard.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - - name: shard2 - storageSizeInGb: 50 - - name: shard3 - storageSizeInGb: 50 - - name: shard4 - storageSizeInGb: 50 - - name: shard5 - storageSizeInGb: 50 - catalog: - - name: catalog - storageSizeInGb: 50 - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isDeleteOraPvc: True - namespace: shns diff --git a/doc/sharding/provisioning/shard_prov_send_notification.yaml b/doc/sharding/provisioning/shard_prov_send_notification.yaml deleted file mode 100644 index af723c8e..00000000 --- a/doc/sharding/provisioning/shard_prov_send_notification.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# -# Copyright (c) 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: ShardingDatabase -metadata: - name: shardingdatabase-sample - namespace: shns -spec: - shard: - - name: shard1 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - - name: shard2 - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-1" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - catalog: - - name: catalog - storageSizeInGb: 50 - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvAnnotations: - volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea - gsm: - - name: gsm1 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-2" - - name: gsm2 - storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" - nodeSelector: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-3" - pvMatchLabels: - "failure-domain.beta.kubernetes.io/zone": "EU-FRANKFURT-1-AD-3" - storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest - dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false - isClone: True - isDeleteOraPvc: True - namespace: shns - nsConfigMap: onsconfigmap - nsSecret: my-secret - diff --git a/doc/sidb/README.md b/doc/sidb/README.md deleted file mode 100644 index 92dea684..00000000 --- a/doc/sidb/README.md +++ /dev/null @@ -1,264 +0,0 @@ -# Managing Oracle Single Instance Databases with Oracle Database Operator for Kubernetes - -Oracle Database Operator for Kubernetes (the operator) includes the Single Instance Database Controller that enables provisioning, cloning, and patching of Oracle Single Instance Databases on Kubernetes. The following sections explain the setup and functionality of the operator - -* [Prerequisites](#prerequisites) -* [Kind SingleInstanceDatabase Resource](#kind-singleinstancedatabase-resource) -* [Provision New Database](#provision-new-database) -* [Clone Existing Database](#clone-existing-database) -* [Patch/Rollback Database](#patchrollback-database) - - -## Prerequisites - -Oracle strongly recommends that you follow the [Prerequisites](./SIDB_PREREQUISITES.md). - -## Kind SingleInstanceDatabase Resource - - The Oracle Database Operator creates the SingleInstanceDatabase kind as a custom resource that enables Oracle Database to be managed as a native Kubernetes object - -* ### SingleInstanceDatabase Sample YAML - - For the use cases detailed below a sample .yaml file is available at - * Enterprise, Standard Editions - [config/samples/singleinstancedatabase.yaml](./../../config/samples/singleinstancedatabase.yaml) - - **Note:** The `adminPassword` field of the above `singleinstancedatabase.yaml` yaml contains a secret for Single Instance Database creation (Provisioning a new database or cloning an existing database). This secret gets deleted after the database pod becomes ready for security reasons. - -* ### List Databases - - ```sh - $ kubectl get singleinstancedatabases -o name - - singleinstancedatabase.database.oracle.com/sidb-sample - singleinstancedatabase.database.oracle.com/sidb-sample-clone - - ``` - -* ### Quick Status - - ```sh - $ kubectl get singleinstancedatabase sidb-sample - - NAME EDITION STATUS ROLE VERSION CLUSTER CONNECT STR CONNECT STR OEM EXPRESS URL - sidb-sample Enterprise Healthy PRIMARY 19.3.0.0.0 (29517242) sidb-sample.default:1521/ORCL1 144.25.10.119:1521/ORCL https://144.25.10.119:5500/em - ``` - -* ### Detailed Status - - ```sh - $ kubectl describe singleinstancedatabase sidb-sample-clone - - Name: sidb-sample-clone - Namespace: default - Labels: - Annotations: - API Version: database.oracle.com/v1alpha1 - Kind: SingleInstanceDatabase - Metadata: .... - Spec: .... - Status: - Cluster Connect String: sidb-sample-clone.default:1521/ORCL1C - Conditions: - Last Transition Time: 2021-06-29T15:45:33Z - Message: Waiting for database to be ready - Observed Generation: 2 - Reason: LastReconcileCycleQueued - Status: True - Type: ReconcileQueued - Last Transition Time: 2021-06-30T11:07:56Z - Message: processing datapatch execution - Observed Generation: 3 - Reason: LastReconcileCycleBlocked - Status: True - Type: ReconcileBlocked - Last Transition Time: 2021-06-30T11:16:58Z - Message: no reconcile errors - Observed Generation: 3 - Reason: LastReconcileCycleCompleted - Status: True - Type: ReconcileComplete - Connect String: 144.25.10.119:1521/ORCL1C - Datafiles Created: true - Datafiles Patched: true - Edition: Enterprise - Flash Back: true - Force Log: false - Oem Express URL: https://144.25.10.119:5500/em - Pdb Name: orclpdb1 - Release Update: 19.11.0.0.0 (32545013) - Replicas: 2 - Role: PRIMARY - Sid: ORCL1C - Status: Healthy - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Database Pending 35m (x2 over 35m) SingleInstanceDatabase Waiting for database pod to be ready - Normal Database Creating 27m (x24 over 34m) SingleInstanceDatabase Waiting for database to be ready - Normal Database Ready 22m SingleInstanceDatabase database open on pod sidb-sample-clone-133ol scheduled on node 10.0.10.6 - Normal Datapatch Pending 21m SingleInstanceDatabase datapatch execution pending - Normal Datapatch Executing 20m SingleInstanceDatabase datapatch begin execution - Normal Datapatch Done 8s SingleInstanceDatabase Datapatch from 19.3.0.0.0 to 19.11.0.0.0 : SUCCESS - - ``` - -## Provision New Database - - Provision a new database instance by specifying appropriate values for the attributes in the the example `.yaml` file, and running the following command: - - ```sh - $ kubectl create -f singleinstancedatabase.yaml - - singleinstancedatabase.database.oracle.com/sidb-sample created - ``` - -* ### Creation Status - - Creating a new database instance takes a while. When the 'status' status returns the response "Healthy", the Database is open for connections. - - ```sh -$ kubectl get singleinstancedatabase/sidb-sample --template={{.status.status}} - - Healthy -``` - - - -* ### Connection Information - - External and internal (running in Kubernetes pods) clients can connect to the database using .status.connectString and .status.clusterConnectString - respectively in the following command - - ```sh - $ kubectl get singleinstancedatabase/sidb-sample --template={{.status.connectString}} - - 144.25.10.119:1521/ORCL - ``` - - The Oracle Database inside the container also has Oracle Enterprise Manager Express configured. To access OEM Express, start the browser and follow the URL: - - ```sh - $ kubectl get singleinstancedatabase/sidb-sample --template={{.status.oemExpressUrl}} - - https://144.25.10.119:5500/em - ``` - -* ### Update Database Config - - The following database parameters can be updated post database creation: flashBack, archiveLog, forceLog. Change their attribute values and apply using - kubectl apply or edit/patch commands . Enable archiveLog before turning ON flashBack . Turn OFF flashBack before disabling the archiveLog - - ```sh - $ kubectl --type merge -p '{"spec":{"forceLog": true}}' patch singleinstancedatabase/sidb-sample - - singleinstancedatabase.database.oracle.com/sidb-sample patched - ``` - -* #### Database Config Status - - Check the Database Config Status using the following command - - ```sh - $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath=[{.status.archiveLog}, {.status.flashBack}, {.status.forceLog}]" - - [true, true, true] - ``` - -* ### Update Initialization Parameters - - The following database initialization parameters can be updated post database creation: `sgaTarget, pgaAggregateTarget, cpuCount, processes`. Change their attribute values and apply using kubectl apply or edit/patch commands. - - **NOTE** - * `sgaTarget` should be in range [sga_min_size, sga_max_size], else initialization parameter `sga_target` would not be updated to specified `sgaTarget`. - -* ### Multiple Replicas - - Multiple database pod replicas can be provisioned when the persistent volume access mode is ReadWriteMany. Database is open and mounted by one of the replicas. Other replicas will have instance started but not mounted and serve to provide quick cold fail-over in case the active pod dies. Update the replica attribute in the .yaml and apply using the kubectl apply command or edit/patch commands - - Note: This functionality requires the [K8s extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/k8s) - Pre-built images from container-registry.oracle.com include the K8s extension - -* ### Patch Attributes - - The following attributes cannot be patched post SingleInstanceDatabase instance Creation : sid, edition, charset, pdbName, cloneFrom. - - ```sh - $ kubectl --type=merge -p '{"spec":{"sid":"ORCL1"}}' patch singleinstancedatabase sidb-sample - - The SingleInstanceDatabase "sidb-sample" is invalid: spec.sid: Forbidden: cannot be changed - ``` - -* #### Patch Persistence Volume Claim - - Persistence Volume Claim (PVC) can be patched post SingleInstanceDatabase instance Creation . This will **delete all the database pods, PVC** and new database pods are created using the new PVC . - - ```sh - $ kubectl --type=merge -p '{"spec":{"persistence":{"accessMode":"ReadWriteMany","size":"110Gi","storageClass":""}}}' patch singleinstancedatabase sidb-sample - - singleinstancedatabase.database.oracle.com/sidb-sample patched - ``` - -* #### Patch Service - - Service can be patched post SingleInstanceDatabase instance Creation . This will **replace the Service with a new type** . - * NodePort - '{"spec":{"loadBalancer": false}}' - * LoadBalancer - '{"spec":{"loadBalancer": true }}' - - ```sh - $ kubectl --type=merge -p '{"spec":{"loadBalancer": false}}' patch singleinstancedatabase sidb-sample - - singleinstancedatabase.database.oracle.com/sidb-sample patched - ``` - -## Clone Existing Database - - Quickly create copies of your existing database using this cloning functionality. A cloned database is an exact, block-for-block copy of the source database. - This is much faster than creating a fresh new database and copying over the data. - - To clone, specify the source database reference as value for the cloneFrom attribute in the sample .yaml. - The source database must have archiveLog mode set to true. - - ```sh - $ grep 'cloneFrom:' singleinstancedatabase.yaml - - cloneFrom: "sidb-sample" - - $ kubectl create -f singleinstancedatabase.yaml - - singleinstancedatabase.database.oracle.com/sidb-sample-clone created - ``` - - Note: The clone database can specify a database image different from the source database. In such cases, cloning is supported only between databases of the same major release. - -## Patch/Rollback Database - - Databases running in your cluster and managed by this operator can be patched or rolled back between release updates of the same major release. To patch databases, specify an image of the higher release update, and to roll back, specify an image of the lower release update. - - Patched Oracle Docker images can be built using this [patching extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/patching) - -* ### In Place Resources or Objects - - Edit the `.yaml` file of the database resourece/object and specify a new release update for release or image attributes. The database pods will be restarted - with the new release update image. For minimum downtime, ensure that you have mutiple replicas of the database pods running. - -* ### Out of Place - - Clone your source database using the method of [cloning existing database](README.md#clone-existing-database) and specify a new release version/image for the - cloned database. Use this method to enusure there are no patching related issues impacting your database performance/functionality - -* ### Datapatch status - - Patching/Rollback operations are complete when the datapatch tool completes patching or rollback of the data files. Check the data files patching status - and current release update version using the following commands - - ```sh - $ kubectl get singleinstancedatabase/sidb-sample --template={{.status.datafilesPatched}} - - true - - $ kubectl get singleinstancedatabase/sidb-sample --template={{.status.releaseUpdate} - - 19.3.0.0.0 (29517242) - ``` - diff --git a/doc/sidb/SIDB_PREREQUISITES.md b/doc/sidb/SIDB_PREREQUISITES.md deleted file mode 100644 index 43da0127..00000000 --- a/doc/sidb/SIDB_PREREQUISITES.md +++ /dev/null @@ -1,14 +0,0 @@ -## Prerequisites for Oracle Docker Image Deployment -To deploy Oracle Database Operator for Kubernetes on Oracle Docker images, complete these steps. - -* ### Prepare Oracle Docker Images - - Build SingleInstanceDatabase Docker Images from source, following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance), or - use the pre-built images available at [https://container-registry.oracle.com](https://container-registry.oracle.com) - - Oracle Database Releases Supported: Oracle Database 19c Enterprise Edition or Standard Edition, and later releases. - -* ### Set Up Kubernetes and Volumes - - Set up an on-premises Kubernetes cluster, or subscribe to a managed Kubernetes service, such as Oracle Cloud Infrastructure Container Engine for Kubernetes, configured with persistent volumes. The persistent volumes are required for storage of the database files. - diff --git a/docs/adb/ACD.md b/docs/adb/ACD.md new file mode 100644 index 00000000..81ee1a65 --- /dev/null +++ b/docs/adb/ACD.md @@ -0,0 +1,239 @@ +# Managing Oracle Autonomous Container Databases on Dedicated Exadata Infrastructure + +Oracle Database Operator for Kubernetes (`OraOperator`) includes the Oracle Autonomous Container Database Controller. Autonomous Container Database is one of the resources of Oracle Autonomous Database dedicated Exadata infrastructure feature. You can create multiple Autonomous Container Database resources in a single Autonomous Exadata VM Cluster resource, but you must create at least one before you can create any Autonomous Databases. + +Before you use the Oracle Database Operator for Kubernetes (the operator), ensure your system meets all of the Oracle Autonomous Database (ADB) Prerequisites [ADB_PREREQUISITES](./../adb/ADB_PREREQUISITES.md). + +As indicated in the prerequisites (see above), to interact with OCI services, either the cluster has to be authorized using Principal Instance, or using the API Key Authentication by specifying the configMap and the secret under the `ociConfig` field. + +## Required Permissions + +The operator must be given the required type of access in a policy written by an administrator to manage the Autonomous Container Databases. See [Create an Autonomous Container Database](https://docs.oracle.com/en-us/iaas/autonomous-database/doc/create-acd.html) for the required policies. + +The permission to view the workrequests is also required, so that the operator will update the resources when the work is done. See [Viewing Work Requests](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengviewingworkrequests.htm#contengviewingworkrequests) for sample work request policies. + +## Supported Features + +After the operator is deployed, choose one of the following operations to create an `AutonomousContainerDatabase` custom resource for Oracle Autonomous Container Database in your cluster. + +* [Provision](#provision-an-autonomous-container-database) an Autonomous Container Database +* [Bind](#bind-to-an-existing-autonomous-container-database) to an existing Autonomous Container Database + +After you create the resource, you can use the operator to perform the following tasks: + +* [Change the display name](#change-the-display-name) of an Autonomous Container Database +* [Restart/Terminate](#restartterminate) an Autonomous Container Database +* [Delete the resource](#delete-the-resource) from the cluster + +## Provision an Autonomous Container Database + +Follow the steps to provision an Autonomous Database that will map objects in your cluster. + +1. Get the `Compartment OCID`. + + Login Cloud Console and click `Compartment`. + + ![compartment-1](/images/adb/compartment-1.png) + + Click on the compartment name where you want to create your database, and **copy** the `OCID` of the compartment. + + ![compartment-2](/images/adb/compartment-2.png) + +2. Get the `AutonomousExadataVMCluster OCID`. + + Login Cloud Console. Go to `Autonomous Database`, and click the `Autonomous Exadata VM Cluster` under the Dedicated Infrastructure. + + ![aei-1](/images/adb/adb-id-1.png) + + Click on the name of the Autonomous Exadata VM Cluster, and copy the `OCID`. + + ![aei-2](/images/adb/aei-id-1.png) + + ![aei-3](/images/adb/aei-id-2.png) + +3. Add the following fields to the AutonomousContainerDatabase resource definition. An example `.yaml` file is available here: [`config/samples/acd/autonomouscontainerdatabase_create.yaml`](./../../config/samples/acd/autonomouscontainerdatabase_create.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.compartmentOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Container Database. | Yes | + | `spec.autonomousExadataVMClusterOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Exadata Infrastructure. | Yes | + | `spec.displayName` | string | The user-friendly name for the Autonomous Container Database. The name does not have to be unique. | Yes | + | `spec.patchModel` | string | The Database Patch model preference. The following values are valid: RELEASE_UPDATES and RELEASE_UPDATE_REVISIONS. Currently, the Release Update Revision maintenance type is not a selectable option. | No | + | `spec.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./../adb/ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./../adb/ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousContainerDatabase + metadata: + name: autonomouscontainerdatabase-sample + spec: + compartmentOCID: ocid1.compartment... OR ocid1.tenancy... + autonomousExadataVMClusterOCID: ocid1.autonomousexainfrastructure... + displayName: newACD + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +4. Apply the yaml: + + ```sh + kubectl apply -f config/samples/acd/autonomouscontainerdatabase_create.yaml + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample created + ``` + +## Bind to an existing Autonomous Container Database + +Other than provisioning a container database, you can bind to an existing Autonomous Container Database in your cluster. + +1. Clean up the resource you created in the earlier provision operation: + + ```sh + kubectl delete adb/autonomouscontainerdatabase-sample + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample deleted + ``` + +2. Copy the `Autonomous Container Database OCID` from Cloud Console. + + ![acd-id-1](/images/adb/adb-id-1.png) + + ![acd-id-2](/images/adb/acd-id-1.png) + + ![acd-id-3](/images/adb/acd-id-2.png) + +3. Add the following fields to the AutonomousContainerDatabase resource definition. An example `.yaml` file is available here: [`config/samples/acd/autonomouscontainerdatabase_bind.yaml`](./../../config/samples/acd/autonomouscontainerdatabase_bind.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.autonomousContainerDatabaseOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Container Database you want to bind (create a reference) in your cluster. | Yes | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousContainerDatabase + metadata: + name: autonomouscontainerdatabase-sample + spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +4. Apply the yaml. + + ```sh + kubectl apply -f config/samples/acd/autonomouscontainerdatabase_bind.yaml + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample created + ``` + +## Change the display name + +> Note: this operation requires an `AutonomousContainerDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been completed, and the operator is authorized with API Key Authentication. + +You can change the display name of the database by modifying the value of the `displayName`, as follows: + +1. An example YAML file is available here: [config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml](./../../config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousContainerDatabase + metadata: + name: autonomouscontainerdatabase-sample + spec: + compartmentOCID: ocid1.compartment... OR ocid1.tenancy... + displayName: RenamedADB + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + + * `displayNameName`: User-friendly name of the Autonomous Container Database. The name does not have to be unique. + +2. Apply the change using `kubectl`. + + ```sh + kubectl apply -f config/samples/acd/autonomouscontainerdatabase_change_displayname.yaml + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample configured + ``` + +## Restart/Terminate + +> Note: this operation requires an `AutonomousContainerDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +Users can restart/terminate a database using the `action` attribute. The value will be erased after the change is applied. +Here's a list of the values you can set for `action`: + +* `RESTART`: to restart the database +* `TERMINATE`: to terminate the database +* `SYNC`: to sync the local database with the remote one + +1. A sample .yaml file is available here: [config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml](./../../config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousContainerDatabase + metadata: + name: autonomouscontainerdatabase-sample + spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + # Change the action to "TERMINATE" to terminate the database + action: RESTART + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the change to restart the database. + + ```sh + kubectl apply -f config/samples/acd/autonomouscontainerdatabase_restart_terminate.yaml + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample configured + ``` + +## Delete the resource + +> Note: this operation requires an `AutonomousContainerDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +The `hardLink` defines the behavior when the resource is deleted from the cluster. If the `hardLink` is set to true, the Operator terminates the Autonomous Container Database in OCI when the resource is removed; otherwise, the Autonomous Container Database remains unchanged. By default the value is `false` if it is not explicitly specified. + +Follow the steps to delete the resource and terminate the Autonomous Container Database. + +1. Use the example [autonomouscontainerdatabase_delete_resource.yaml](./../../config/samples/acd/autonomouscontainerdatabase_delete_resource.yaml) which sets the attribute `hardLink` to true. + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousContainerDatabase + metadata: + name: autonomouscontainerdatabase-sample + spec: + autonomousContainerDatabaseOCID: ocid1.autonomouscontainerdatabase... + hardLink: true + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml + + ```sh + kubectl apply -f config/samples/acd/autonomouscontainerdatabase_delete_resource.yaml + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample configured + ``` + +3. Delete the resource in your cluster + + ```sh + kubectl delete acd/autonomouscontainerdatabase-sample + autonomouscontainerdatabase.database.oracle.com/autonomouscontainerdatabase-sample deleted + ``` + +Now, you can verify that the Autonomous Container Database is in TERMINATING state. diff --git a/docs/adb/ADB_LONG_TERM_BACKUP.md b/docs/adb/ADB_LONG_TERM_BACKUP.md new file mode 100644 index 00000000..312dac0d --- /dev/null +++ b/docs/adb/ADB_LONG_TERM_BACKUP.md @@ -0,0 +1,49 @@ +# Creating Long-Term Backups of an Oracle Autonomous Database + +To create long-term backups of Autonomous Databases, use this procedure. + +Oracle Cloud Infrastructure (OCI) automatically backs up your Autonomous Databases, and retains these backups for 60 days. You can restore and recover your database to any point-in-time in this retention period. Automatic backups are full backups taken every 60 days, with daily incremental backups. You can also create long-term backups for your database with a retention period ranging from 3 months to 10 years. For more information, see: [Create Long-Term Backups on Autonomous Database](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/backup-long-term.html) and [Backup and Restore Notes](https://docs.oracle.com/en-us/iaas/autonomous-database-serverless/doc/backup-restore-notes.html). + +## Create Long-Term Backup + +To back up an Autonomous Database, complete this procedure. + +1. Add the following fields to the `AutonomousDatabaseBackup` resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_backup.yaml`](./../../config/samples/adb/autonomousdatabase_backup.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.displayName` | string | The user-friendly name for the backup. This name does not have to be unique. | Yes | + | `spec.isLongTermBackup` | boolean | Indicates whether the backup is long-term. | Yes | + | `spec.retentionPeriodInDays` | string | Retention period, in days, for long-term backups. Minimum retention period is 90 days. | Yes | + | `spec.target.k8sADB.name` | string | The name of custom resource of the target Autonomous Database. Choose either the `spec.target.k8sADB.name` or the `spec.target.ociADB.ocid`, but not both. | Conditional | + | `spec.target.ociADB.ocid` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the target AutonomousDatabase. Choose either the `spec.target.k8sADB.name` or the `spec.target.ociADB.ocid`, but not both. | Conditional | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from this section: [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication). | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the Kubernetes (K8s) Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabaseBackup + metadata: + name: autonomousdatabasebackup-sample + spec: + target: + k8sADB: + name: autonomousdatabase-sample + # # Uncomment the below block if you use ADB OCID as the input of the target ADB + # ociADB: + # ocid: ocid1.autonomousdatabase... + displayName: autonomousdatabasebackup-sample + isLongTermBackup: true + retentionPeriodInDays: 90 + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_backup.yaml + autonomousdatabasebackup.database.oracle.com/autonomousdatabasebackup-sample created + ``` diff --git a/docs/adb/ADB_PREREQUISITES.md b/docs/adb/ADB_PREREQUISITES.md new file mode 100644 index 00000000..a730f4fe --- /dev/null +++ b/docs/adb/ADB_PREREQUISITES.md @@ -0,0 +1,150 @@ +# + +## Oracle Autonomous Database (ADB) Prerequisites + +Oracle Database Operator for Kubernetes must have access to OCI services. + +To provide access, choose **one of the following approaches**: + +* The provider uses [API Key authentication](#authorized-with-api-key-authentication) + +* The Kubernetes cluster nodes are [granted with Instance Principal](#authorized-with-instance-principal) + +## Authorized with API Key Authentication + +API keys are supplied by users to authenticate the operator accessing Oracle Cloud Infrastructure (OCI) services. The operator reads the credentials of the OCI user from a ConfigMap and a Secret. If you're using Oracle Container Engine for Kubernetes (OKE), you may alternatively use [Instance Principal](#authorized-with-instance-principal) to avoid the need to configure user credentials or a configuration file. If the operator is deployed in a third-party Kubernetes cluster, then the credentials or a configuration file are needed, since Instance principal authorization applies only to instances that are running in the OCI. + +Oracle recommends using the helper script `set_ocicredentials.sh` in the root directory of the repository; this script will generate a ConfigMap and a Secret with the OCI credentials. By default, the script parses the **DEFAULT** profile in `~/.oci/config`. The default names of the ConfigMap and the Secret are, respectively: `oci-cred` and `oci-privatekey`. + +```sh +./set_ocicredentials.sh run +``` + +You can change the default values as follows: + +```sh +./set_ocicredentials.sh run -path -profile -configmap -secret +``` + +Alternatively, you can create these values manually. The ConfigMap should contain the following items: `tenancy`, `user`, `fingerprint`, `region`, `passphrase`. The Secret should contain an entry named `privatekey`. + +```sh +kubectl create configmap oci-cred \ +--from-literal=tenancy= \ +--from-literal=user= \ +--from-literal=fingerprint= \ +--from-literal=region= \ +--from-literal=passphrase=(*) + +kubectl create secret generic oci-privatekey \ +--from-file=privatekey= +``` + +> Note: passphrase is deprecated. You can ignore that line. + +After creating the ConfigMap and the Secret, use their names as the values of `ociConfigMap` and `ociSecret` attributes in the yaml files for provisioning, binding, and other operations. + +## Authorized with Instance Principal + +Instance principal authorization enables the operator to make API calls from an instance (that is, a node) without requiring the `ociConfigMap`, and `ociSecret` attributes in the `.yaml` file. This approach applies only to instances that are running in the Oracle Cloud Infrastructure (OCI). In addition, this approach grants permissions to the nodes that match the rules, which means that all the pods in the nodes can make the service calls. + +To set up the instance principals, complete the following tasks: + +* [Define dynamic group that includes the nodes in which the operator runs](#define-dynamic-group) +* [Define policies that grant to the dynamic group the required permissions for the operator to its OCI interactions](#define-policies) + +### Define Dynamic Group + +1. Go to the **Dynamic Groups** page, and click **Create Dynamic Group**. + + ![instance-principal-1](/images/adb/instance-principal-1.png) + +2. In the **Matching Rules** section, write rules the to include the OKE nodes in the dynamic group. + + Example 1 : enables **all** the instances, including OKE nodes in the compartment, to be members of the dynamic group. + + ```sh + All {instance.compartment.id = ''} + ``` + + ![instance-principal-2](/images/adb/instance-principal-2.png) + + Example 2 : enables the specific OKE nodes in the compartment, to be members of the dynamic group. + + ```sh + Any {instance.id = '', instance.id = '', instance.id = ''} + ``` + + ![instance-principal-3](/images/adb/instance-principal-3.png) + +3. To apply the rules, click **Create**. + +### Define Policies + +1. Get the `compartment name` where the database resides: + + > Note: You may skip this step if the database is in the root compartment. + + Go to **Autonomous Database** in the Cloud Console. + + ![adb-id-1](/images/adb/adb-id-1.png) + + Copy the name of the compartment in the details page. + + ![instance-principal-4](/images/adb/instance-principal-4.png) + +2. Set up policies for dynamic groups to grant access to its OCI interactions. Use the dynamic group name is from the [Define Dynamic Group](#define-dynamic-group) section, and the compartment name from the previous step: + + Go to **Policies**, and click **Create Policy**. + + ![instance-principal-5](/images/adb/instance-principal-5.png) + + Example 1: enable the dynamic group to manage **all** the resources in a compartment + + ```sh + Allow dynamic-group to manage all-resources in compartment + ``` + + Example 2: enable the dynamic group to manage **all** the resources in your tenancy (root compartment). + + ```sh + Allow dynamic-group to manage all-resources in tenancy + ``` + + Example 3: enable a particular resource access for the dynamic group to manage Oracle Autonomous Database in a given compartment + + ```sh + Allow dynamic-group to manage autonomous-database-family in compartment + ``` + +3. To apply the policy, click Create. + +At this stage, the instances where the operator deploys have been granted sufficient permissions to call OCI services. You can now proceed to the installation. + +### Authorized with OKE Workload Identity + +OKE Workload Identity grants the operator pods policy-driven access to OCI resources using OCI Identity and Access Management (IAM). +When using OKE Workload Identity, only the region must be specified in the ConfigMap corresponding to the `ociConfigMap` attribute. The `ociSecret` attribute should not be specified in the `.yaml` file. + +To set up the OKE Workload Identity, you will have to: + +### Configure Cluster Region + +The operator reads the OCI region from a ConfigMap. + +```sh +kubectl create configmap oci-cred \ +--from-literal=region= +``` + +### Define Policies + +1. Get the compartment name where the database resides/will be created. +2. Get the OCID of the OKE Cluster where the Oracle Database Operator is running. +3. Create the following policy in OCI IAM, supplying your compartment name and OKE Cluster OCID: + +``` +Allow any-user to manage all-resources in compartment where all {request.principal.namespace='oracle-database-operator-system',request.principal.type='workload',request.principal.cluster_id='',request.principal.service_account='default'} +``` + +After creating the policy, operator pods will be granted sufficient permissions to call OCI services. You can now proceed to the installation. diff --git a/docs/adb/ADB_RESTORE.md b/docs/adb/ADB_RESTORE.md new file mode 100644 index 00000000..7a80090a --- /dev/null +++ b/docs/adb/ADB_RESTORE.md @@ -0,0 +1,51 @@ +# Restoring an Oracle Autonomous Database Manually + +To restore an Autonomous Database from a backup, use this document. + +You can either use any existing on-demand or automatic backup to restore your database, or you can restore and recover your database to any point in time in the 60-day retention period of your automatic backups. For point-in-time restores, you specify a timestamp. Your Autonomous Database identifies which backup to use for the fastest restore. + +## Restore an Autonomous Database + +To restore an Autonomous Database from a backup, or by using point-in-time restore, complete this procedure. + +1. Add the following fields to the AutonomousDatabaseBackup resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_restore.yaml`](./../../config/samples/adb/autonomousdatabase_restore.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.target.k8sADB.name` | string | The name of custom resource of the target Autonomous Database (`AutonomousDatabase`). Choose either the `spec.target.k8sADB.name` or the `spec.target.ociADB.ocid`, but not both. | Conditional | + | `spec.target.ociADB.ocid` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the target `AutonomousDatabase`. Choose either the `spec.target.k8sADB.name` or the `spec.target.ociADB.ocid`, but not both. | Conditional | + | `spec.source.k8sADBBackup.name` | string | The name of custom resource of the `AutonomousDatabaseBackup` that you want to restore from. Choose either the `spec.source.k8sADBBackup.name` or the `spec.source.pointInTime.timestamp`, but not both. | Conditional | + | `spec.source.pointInTime.timestamp` | string | The timestamp to specify the point in time to which you want the database restored. Your Autonomous Database identifies which backup to use for the fastest restore. The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT. Choose either the `spec.source.k8sADBBackup.name` or the `spec.source.pointInTime.timestamp`, but not both. | Conditional | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from this section: [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication). | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the `ConfigMap` that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the Kubernetes (K8s) Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabaseRestore + metadata: + name: autonomousdatabaserestore-sample + spec: + target: + k8sADB: + name: autonomousdatabase-sample + # # Uncomment the below block if you use ADB OCID as the input of the target ADB + # ociADB: + # ocid: ocid1.autonomousdatabase... + source: + k8sADBBackup: + name: autonomousdatabasebackup-sample + # # Uncomment the following field to perform point-in-time restore + # pointInTime: + # timestamp: 2022-12-23 11:03:13 UTC + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_restore.yaml + autonomousdatabaserestore.database.oracle.com/autonomousdatabaserestore-sample created + ``` diff --git a/docs/adb/NETWORK_ACCESS_OPTIONS.md b/docs/adb/NETWORK_ACCESS_OPTIONS.md new file mode 100644 index 00000000..e7eb0a56 --- /dev/null +++ b/docs/adb/NETWORK_ACCESS_OPTIONS.md @@ -0,0 +1,268 @@ +# Configuring Network Access for Oracle Autonomous Database + +To configure network access for Oracle Autonomous Database (Autonomous Database), review and complete the procedures in this document. + +Network access for Autonomous Database includes public access, and configuring secure access, either over public networks using access control rules (ACLs), or by using using private endpoints inside a Virtual Cloud Network (VCN) in your tenancy. This document also describes procedures to configure the Transport Layer Security (TLS) connections, with the option either to require mutual TLS only, or to allow both one-way TLS and mutual TLS. + +For more information about these options, see: [Configuring Network Access with Access Control Rules (ACLs) and Private Endpoints ](https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-network-access.html#GUID-D2D468C3-CA2D-411E-92BC-E122F795A413). + +## Supported Features +Review the following options available to you with Autonomous Database. + +* [Configuring Network Access with Allowing Secure Access from Anywhere](#configuring-network-access-with-allowing-secure-access-from-anywhere) on shared Exadata infrastructure +* [Configuring Network Access with Access Control Rules (ACLs)](#configuring-network-access-with-access-control-rules-acls) on shared Exadata infrastructure +* [Configure Network Access with Private Endpoint Access Only](#configure-network-access-with-private-endpoint-access-only) on shared Exadata infrastructure +* [Allowing TLS or Require Only Mutual TLS (mTLS) Authentication](#allowing-tls-or-require-only-mutual-tls-mtls-authentication) on shared Exadata infrastructure +* [Autonomous Database with access control list enabled](#autonomous-database-with-access-control-list-enabled-on-dedicated-exadata-infrastructure) on dedicated Exadata infrastructure + +## Configuring Network Access with Allowing Secure Access from Anywhere + +Before changing the Network Access to Allowing Secure Access from Anywhere, ensure that your network security protocol requries only mTLS (Mutual TLS) authentication. For more details, see: [Allow both TLS and mutual TLS (mTLS) authentication](#allow-both-tls-and-mutual-tls-mtls-authentication). If mTLS enforcement is already enabled on your Autonomous Database, then you can skip this step. + +To specify that Autonomous Database can be connected from any location with a valid credential, complete one of the following procedures, based on your network access configuration. + +### Option 1 - Change the Network Access from "Secure Access from Allowed IPs and VCNs Only" to "Allowing Secure Access from Anywhere" +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | + |----|----|----| + | `whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, then use an array with a single empty string entry. | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + whitelistedIps: + - + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +### Option 2 - Change the Network Access from "Private Endpoint Access Only" to "Allowing Secure Access from Anywhere" + +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | + |----|----|----| + | `privateEndpointLabel` | string | The hostname prefix for the resource. | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + privateEndpointLabel: "" + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Configuring Network Access with Access Control Rules (ACLs) + +To configure Network Access with ACLs, complete this procedure. + + +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | + |----|----|----| + | `whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + networkAccess: + # Restrict access by defining access control rules in an Access Control List (ACL). + whitelistedIps: + - 1.1.1.1 + - 1.1.0.0/16 + - ocid1.vcn... + - ocid1.vcn...;1.1.1.1 + - ocid1.vcn...;1.1.0.0/16 + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + +## Configure Network Access with Private Endpoint Access Only + +To change the Network Access to Private Endpoint Access Only, complete this procedure + +1. Visit [Overview of VCNs and Subnets](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingVCNs_topic-Overview_of_VCNs_and_Subnets.htm#console) and [Network Security Groups](https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#working) to see how to create VCNs, subnets, and network security groups (NSGs) if you haven't already created them. The subnet and the NSG must be in the same VCN. + +2. Copy and paste the OCIDs of the subnet and NSG to the corresponding parameters. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.

**Subnet Restrictions:**
- For bare metal DB systems and for single-node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node Oracle RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting `subnetID` disables public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | Yes | + | `nsgIds` | string[] | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting `nsgIds` to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rule](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).

**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + subnetId: ocid1.subnet... + nsgIds: + - ocid1.networksecuritygroup... + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +## Allowing TLS or Require Only Mutual TLS (mTLS) Authentication +You can choose either to require mTLS authentication and disallow TLS authentication, or allow both mTLS and TLS authentication. + +### Require mutual TLS (mTLS) authentication and Disallow TLS Authentication + +To configure your Autonomous Database instance to require mTLS connections and disallow TLS connections, complete this procedure. + +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_mtls.yaml`](./../../config/samples/adb/autonomousdatabase_update_mtls.yaml): + + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `isMtlsConnectionRequired` | boolean| Indicates whether the Autonomous Database requires mTLS connections. | Yes | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + isMtlsConnectionRequired: true + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_mtls.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +### Allow both TLS and mutual TLS (mTLS) authentication + +If your Autonomous Database instance is configured to allow only mTLS connections, then you can reconfigure the instance to permit both mTLS and TLS connections. When you reconfigure the instance to permit both mTLS and TLS, you can use both authentication types at the same time, so that connections are no longer restricted to require mTLS authentication. + +This option only applies to Autonomous Databases on shared Exadata infrastructure. You can permit TLS connections when network access type is configured by using one of the following options: + +* **Access Control Rules (ACLs)**: with ACLs defined. +* **Private Endpoint Access Only**: with a private endpoint defined. + +Complete this procedure to allow both TLS and mTLS authentication. + +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_mtls.yaml`](./../../config/samples/adb/autonomousdatabase_update_mtls.yaml): + + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `isMtlsConnectionRequired` | boolean| Indicates whether the Autonomous Database requires mTLS connections. | Yes | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + isMtlsConnectionRequired: false + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_mtls.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Autonomous Database with access control list enabled on dedicated Exadata infrastructure + +To configure the network access of Autonomous Database with access control list (ACL) on dedicated Exadata infrastructure, complete this procedure. + +1. Add the following parameters to the specification. An example file is available here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.

If disabled, then database access is defined by the network security rules.

If enabled, then database access is restricted to the IP addresses defined by the rules specified with the `accessControlList` property. While specifying `accessControlList` rules is optional, if database-level access control is enabled, and no rules are specified, then the database will become inaccessible. The rules can be added later by using the `UpdateAutonomousDatabase` API operation, or by using the edit option in console.

When creating a database clone, you should specify the access control setting that you want the clone database to use. By default, database-level access control is disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. | Yes | + | `accessControlList` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, the access control list is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | Yes | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + isAccessControlEnabled: true + accessControlList: + - 1.1.1.1 + - 1.1.0.0/16 + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured diff --git a/docs/adb/README.md b/docs/adb/README.md new file mode 100644 index 00000000..e8164697 --- /dev/null +++ b/docs/adb/README.md @@ -0,0 +1,549 @@ +# Managing Oracle Autonomous Databases with Oracle Database Operator for Kubernetes + +Before you use the Oracle Database Operator for Kubernetes (the operator), ensure that your system meets all of the Oracle Autonomous Database (ADB) Prerequisites [ADB_PREREQUISITES](./ADB_PREREQUISITES.md). + +As indicated in the prerequisites (see above), to interact with OCI services, either the cluster must be authorized using Principal Instance, or the cluster must be authorized using the API Key Authentication by specifying the configMap and the secret under the `ociConfig` field. + +## Required Permissions + +The operator must be given the required type of access in a policy written by an administrator to manage the Autonomous Databases. For examples of Autonomous Database policies, see: [Let database and fleet admins manage Autonomous Databases](https://docs.oracle.com/en-us/iaas/Content/Identity/Concepts/commonpolicies.htm#db-admins-manage-adb) + +Permissions to view the work requests are also required, so that the operator can update the resources when the work is done. For example work request policies, see: [Viewing Work Requests](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengviewingworkrequests.htm#contengviewingworkrequests) + +## Supported Features + +After the operator is deployed, choose one of the following operations to create an `AutonomousDatabase` custom resource for Oracle Autonomous Database in your cluster. + +* [Provision](#provision-an-autonomous-database) an Autonomous Database +* [Bind](#bind-to-an-existing-autonomous-database) to an existing Autonomous Database + +After you create the resource, you can use the operator to perform the following tasks: + +* [Scale the OCPU core count or storage](#scale-the-ocpu-core-count-or-storage) an Autonomous Database +* [Rename](#rename) an Autonomous Database +* [Manage ADMIN database user password](#manage-admin-password) of an Autonomous Database +* [Download instance credentials (wallets)](#download-wallets) of an Autonomous Database +* [Stop/Start/Terminate](#stopstartterminate) an Autonomous Database +* [Delete the resource](#delete-the-resource) from the cluster +* [Clone](#clone-an-existing-autonomous-database) an existing Autonomous Database + +To debug the Oracle Autonomous Databases with Oracle Database Operator, see [Debugging and troubleshooting](#debugging-and-troubleshooting) + +## Provision an Autonomous Database + +To provision an Autonomous Database that will map objects in your cluster, complete the following steps: + +1. Obtain the `Compartment OCID`. + + Log in to the Cloud Console and click `Compartment`. + + ![compartment-1](/images/adb/compartment-1.png) + + Click on the compartment name where you want to create your database, and **copy** the `OCID` of the compartment. + + ![compartment-2](/images/adb/compartment-2.png) + +2. To create an Autonomous Database on Dedicated Exadata Infrastructure (ADB-D), the OCID of the Oracle Autonomous Container Database is required. + + You can skip this step if you want to create a Autonomous Database on Shared Exadata Infrastructure (ADB-S). + + Go to the Cloud Console and click `Autonomous Database`. + + ![acd-id-1](/images/adb/adb-id-1.png) + + Under `Dedicated Infrastructure`, click `Autonomous Container Database`. + + ![acd-id-2](/images/adb/acd-id-1.png) + + Click on the name of Autonomous Container Database and copy the `Autonomous Container Database OCID` from the Cloud Console. + + ![acd-id-3](/images/adb/acd-id-2.png) + +3. Create a Kubernetes Secret to hold the password of the ADMIN user. **The key and the name of the secret must be the same.** + + You can create this secret by using a command similar to the following example: + + ```sh + kubectl create secret generic admin-password --from-literal=admin-password='password_here' + ``` + +4. Add the following fields to the Autonomous Database resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_create.yaml`](./../../config/samples/adb/autonomousdatabase_create.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.details.compartmentId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | + | `spec.details.dbName` | string | The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. | Yes | + | `spec.details.displayName` | string | The user-friendly name for the Autonomous Database. The name does not have to be unique. | Yes | + | `spec.details.dbWorkload` | string | The Autonomous Database workload type. The following values are valid:
`OLTP` - indicates an Autonomous Transaction Processing database
`DW` - indicates an Autonomous Data Warehouse database
`AJD` - indicates an Autonomous JSON Database
`APEX` - indicates an Autonomous Database with the Oracle APEX Application Development workload type.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.licenseModel` | string | The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-premises Oracle software licenses to equivalent, highly automated Oracle services in the cloud.License Included allows you to subscribe to new Oracle Database software licenses and the Oracle Database service. Note that when provisioning an [Autonomous Database on dedicated Exadata infrastructure](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html), this attribute must be null. It is already set at the Autonomous Exadata Infrastructure level. When provisioning an [Autonomous Database Serverless ](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) database, if a value is not specified, the system defaults the value to `BRING_YOUR_OWN_LICENSE`. Bring your own license (BYOL) also allows you to select the DB edition using the optional parameter.
This cannot be updated in parallel with any of the following: cpuCoreCount, computeCount, dataStorageSizeInTBs, adminPassword, isMtlsConnectionRequired, dbWorkload, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.dbVersion` | string | A valid Oracle Database version for Autonomous Database. | No | + | `spec.details.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. For Autonomous Databases on dedicated Exadata infrastructure, the maximum storage value is determined by the infrastructure shape. See Characteristics of [Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details. A full Exadata service is allocated when the Autonomous Database size is set to the upper limit (384 TB). | No | + | `spec.details.cpuCoreCount` | int | The number of CPU cores to be made available to the database. For Autonomous Databases on dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `ocpuCount` parameter. | Conditional | + | `spec.details.computeModel` | string | The compute model of the Autonomous Database. This is required if using the `computeCount` parameter. If using `cpuCoreCount` then it is an error to specify `computeModel` to a non-null value. ECPU compute model is the recommended model and OCPU compute model is legacy. | Conditional | + | `spec.details.computeCount` | float32 | The compute amount (CPUs) available to the database. Minimum and maximum values depend on the compute model and whether the database is an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
For an Autonomous Database Serverless instance, the 'ECPU' compute model requires a minimum value of one, for databases in the elastic resource pool and minimum value of two, otherwise. Required when using the `computeModel` parameter. When using `cpuCoreCount` parameter, it is an error to specify computeCount to a non-null value. Providing `computeModel` and `computeCount` is the preferred method for both OCPU and ECPU. | Conditional | + | `spec.details.ocpuCount` | float32 | The number of OCPU cores to be made available to the database.
The following points apply:
- For Autonomous Databases on Dedicated Exadata infrastructure, to provision less than 1 core, enter a fractional value in an increment of 0.1. For example, you can provision 0.3 or 0.4 cores, but not 0.35 cores. (Note that fractional OCPU values are not supported for Autonomous Database Serverless instances.)
- To provision 1 or more cores, you must enter an integer between 1 and the maximum number of cores available for the infrastructure shape. For example, you can provision 2 cores or 3 cores, but not 2.5 cores. This applies to an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
- For Autonomous Database Serverless instances, this parameter is not used.
For Autonomous Databases on Dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `cpuCoreCount` parameter. | Conditional | + | `spec.details.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecret.name` or `ociSecret.id` must be provided. If both `k8sSecret.name` and `ociSecret.id` appear, the Operator reads the password from the K8s secret that `k8sSecret.name` refers to. | Yes | + | `spec.details.adminPassword.k8sSecret.name` | string | The **name** of the K8s Secret where you want to hold the password for the ADMIN user. | Conditional | + |`spec.details.adminPassword.ociSecret.id` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | + | `spec.details.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. | Yes | + | `spec.details.isAutoScalingEnabled` | boolean | Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE` | No | + | `spec.details.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm). `spec.details.autonomousContainerDatabase.k8sACD.name` or `spec.details.autonomousContainerDatabase.ociACD.id` has to be provided if the value is true. | No | + | `spec.details.isFreeTier` | boolean | Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of memory. For Always Free databases, memory and CPU cannot be scaled.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, or dbName. | No | + | `spec.details.isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While specifying `whitelistedIps` rules is optional, if database-level access control is enabled and no rules are specified, the database will become inaccessible.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. For Autonomous Database Serverless instances, `whitelistedIps` is used. | No | + | `spec.details.whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for [Autonomous Database Serverless](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
If `arePrimaryWhitelistedIpsUsed` is 'TRUE' then Autonomous Database uses this primary's IP access control list (ACL) for the disaster recovery peer called `standbywhitelistedips`.
For Autonomous Database Serverless, this is an array of CIDR (classless inter-domain routing) notations for a subnet or VCN OCID (virtual cloud network Oracle Cloud ID).
Multiple IPs and VCN OCIDs should be separate strings separated by commas. However, if other configurations require multiple pieces of information, then each piece is connected with semicolon (;) as a delimiter.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`
For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, isMtlsConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
**Subnet Restrictions:**
- For Autonomous Database, setting this will disable public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | No | + | `spec.details.nsgIds` | []string | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting this to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rules](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `spec.details.privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.isMtlsConnectionRequired` | boolean | Specifies if the Autonomous Database requires mTLS connections. | No | + | `spec.details.autonomousContainerDatabase.k8sACD.name` | string | The **name** of the K8s Autonomous Container Database resource | No | + | `spec.details.autonomousContainerDatabase.ociACD.id` | string | The Autonomous Container Database [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). | No | + | `spec.details.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Create + details: + compartmentId: ocid1.compartment... + dbName: NewADB + displayName: NewADB + cpuCoreCount: 1 + adminPassword: + k8sSecret: + name: admin-password # use the name of the secret from step 2 + dataStorageSizeInTBs: 1 + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +5. Choose the type of network access (optional): + + By default, the network access type is set to PUBLIC, which allows secure connections from anywhere. Uncomment the code block if you want configure the network access. For more information, see: [Configuring Network Access of Autonomous Database](./NETWORK_ACCESS_OPTIONS.md) + +6. Apply the YAML: + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_create.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample created + ``` + +## Bind to an existing Autonomous Database + +Other than provisioning a database, you can create the custom resource using an existing Autonomous Database. + +The operator also generates the `AutonomousBackup` custom resources if a database already has backups. The operator syncs the `AutonomousBackups` in every reconciliation loop by getting the list of OCIDs of the AutonomousBackups from OCI, and then creates the `AutonomousDatabaseBackup` object automatically if it cannot find a resource that has the same `AutonomousBackupOCID` in the cluster. + +1. Clean up the resource you created in the earlier provision operation: + + ```sh + kubectl delete adb/autonomousdatabase-sample + autonomousdatabase.database.oracle.com/autonomousdatabase-sample deleted + ``` + +2. Copy the `Autonomous Database OCID` from Cloud Console. + + ![adb-id-1](/images/adb/adb-id-1.png) + + ![adb-id-2](/images/adb/adb-id-2.png) + +3. Add the following fields to the AutonomousDatabase resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_bind.yaml`](./../../config/samples/adb/autonomousdatabase_bind.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.details.id` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database that you want to bind (create a reference) in your cluster. | Yes | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Sync + details: + id: ocid1.autonomousdatabase... + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +4. Apply the yaml. + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_bind.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample created + ``` + +## Scale the OCPU core count or storage + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. To use this example, either the provision operation or the bind operation must be completed, and the operator must be authorized with API Key Authentication. + +You can scale up or scale down the Oracle Autonomous Database OCPU core count or storage by updating the `cpuCoreCount` and `dataStorageSizeInTBs` parameters. The `isAutoScalingEnabled` indicates whether auto scaling is enabled. In this example, the CPU count and storage size (TB) are scaled up to 2 and the auto-scaling is turned off by updating the `autonomousdatabase-sample` custom resource. + +1. An example YAML file is available here: [config/samples/adb/autonomousdatabase_scale.yaml](./../../config/samples/adb/autonomousdatabase_scale.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + id: ocid1.autonomousdatabase... + cpuCoreCount: 2 + dataStorageSizeInTBs: 2 + isAutoScalingEnabled: false + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the change using `kubectl`. + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_scale.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Rename + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been completed, and the operator is authorized with API Key Authentication. + +You can rename the database by changing the values of the `dbName` and `displayName`, as follows: + +1. An example YAML file is available here: [config/samples/adb/autonomousdatabase_rename.yaml](./../../config/samples/adb/autonomousdatabase_rename.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + id: ocid1.autonomousdatabase... + dbName: RenamedADB + displayName: RenamedADB + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + + * `dbName`: The database name. It must begin with an alphabetic character. It can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. + * `displayNameName`: User-friendly name of the database. The name does not have to be unique. + +2. Apply the change using `kubectl`. + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_rename.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Manage Admin Password + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been completed, and the operator is authorized with API Key Authentication. + +1. Create a Kubernetes Secret to hold the new password of the ADMIN user. + + As an example, you can create this secret with the following command: * + + ```sh + kubectl create secret generic new-adb-admin-password --from-literal=new-adb-admin-password='password_here' + ``` + + \* The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing. + +2. Update the example [config/samples/adb/autonomousdatabase_update_admin_password.yaml](./../../config/samples/adb/autonomousdatabase_update_admin_password.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + id: ocid1.autonomousdatabase... + adminPassword: + k8sSecret: + name: new-admin-password + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + + * `adminPassword.k8sSecret.name`: the **name** of the secret that you created in **step1**. + +3. Apply the YAML. + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_update_admin_password.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Download Wallets + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +A client Wallet is required to connect to a shared Oracle Autonomous Database. User has to provide a wallet password to download the Wallet. In the following example, the Operator will read the password from a Kubernetes Secret to download the Wallet. After that, the downloaded Wallet will be unzipped and stored as byte values in a new Kubernetes Secret `instance-wallet`. + +1. Create a Kubernetes Secret to hold the wallet password. + + As an example, you can create this secret with the following command: * + + ```sh + kubectl create secret generic instance-wallet-password --from-literal=instance-wallet-password='password_here' + ``` + + \* The password must be at least 8 characters long and must include at least 1 letter and either 1 numeric character or 1 special character. + +2. Update the example [config/samples/adb/autonomousdatabase_wallet.yaml](./../../config/samples/adb/autonomousdatabase_wallet.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + id: ocid1.autonomousdatabase... + wallet: + name: instance-wallet + password: + k8sSecret: + name: instance-wallet-password + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + + * `wallet.name`: the name of the new Secret where you want the downloaded Wallet to be stored. + * `wallet.password.k8sSecret.name`: the **name** of the secret you created in **step1**. + +3. Apply the YAML + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_wallet.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +You should see a new Secret `instance-wallet` in your cluster: + +```sh +$ kubectl get secrets +NAME TYPE DATA AGE +oci-privatekey Opaque 1 2d12h +instance-wallet-password Opaque 1 2d12h +instance-wallet Opaque 8 2d12h +``` + +To use the secret in a deployment, refer to [Using Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets) for the examples. + +## Stop/Start/Terminate + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +To start, stop, or terminate a database, use the `action` attribute. +Here's a list of the values you can set for `action`: + +* `Start`: to start the database +* `Stop`: to stop the database +* `Terminate`: to terminate the database + +1. An example .yaml file is available here: [config/samples/adb/autonomousdatabase_stop_start_terminate.yaml](./../../config/samples/adb/autonomousdatabase_stop_start_terminate.yaml) + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Stop + details: + id: ocid1.autonomousdatabase... + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the change to stop the database. + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_stop_start_terminate.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Delete the resource + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +The `hardLink` defines the behavior when the resource is deleted from the cluster. If the `hardLink` is set to true, the Operator terminates the Autonomous Database in OCI when the resource is removed; otherwise, the database remains unchanged. By default the value is `false` if it is not explicitly specified. + +To delete the resource and terminate the Autonomous Database, complete these steps: + +1. Use the example [autonomousdatabase_delete_resource.yaml](./../../config/samples/adb/autonomousdatabase_delete_resource.yaml), which sets the attribute `hardLink` to true. + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + id: ocid1.autonomousdatabase... + hardLink: true + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_delete_resource.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +3. Delete the resource in your cluster + + ```sh + kubectl delete adb/autonomousdatabase-sample + autonomousdatabase.database.oracle.com/autonomousdatabase-sample deleted + ``` + +Now, you can verify that the database is in TERMINATING state on the Cloud Console. + +## Clone an existing Autonomous Database + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +To clone an existing Autonomous Database, complete these steps: + +1. Add the following fields to the AutonomousDatabase resource definition. An example YAML file is available here: [config/samples/adb/autonomousdatabase_clone.yaml](./../../config/samples/adb/autonomousdatabase_clone.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.details.id` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that you will clone to create a new Autonomous Database. | Yes | + | `spec.clone.cloneType` | string | The Autonomous Database clone type. Accepted values are: `FULL` and `METADATA`. | No | + | `spec.clone.compartmentId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | + | `spec.clone.dbName` | string | The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. | Yes | + | `spec.clone.displayName` | string | The user-friendly name for the Autonomous Database. The name does not have to be unique. | Yes | + | `spec.clone.dbWorkload` | string | The Autonomous Database workload type. The following values are valid:
`OLTP` - indicates an Autonomous Transaction Processing database
`DW` - indicates an Autonomous Data Warehouse database
`AJD` - indicates an Autonomous JSON Database
`APEX` - indicates an Autonomous Database with the Oracle APEX Application Development workload type.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.licenseModel` | string | The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-premises Oracle software licenses to equivalent, highly automated Oracle services in the cloud.License Included allows you to subscribe to new Oracle Database software licenses and the Oracle Database service. Note that when provisioning an [Autonomous Database on dedicated Exadata infrastructure](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html), this attribute must be null. It is already set at the Autonomous Exadata Infrastructure level. When provisioning an [Autonomous Database Serverless ](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) database, if a value is not specified, the system defaults the value to `BRING_YOUR_OWN_LICENSE`. Bring your own license (BYOL) also allows you to select the DB edition using the optional parameter.
This cannot be updated in parallel with any of the following: cpuCoreCount, computeCount, dataStorageSizeInTBs, adminPassword, isMtlsConnectionRequired, dbWorkload, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.dbVersion` | string | A valid Oracle Database version for Autonomous Database. | No | + | `spec.clone.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. For Autonomous Databases on dedicated Exadata infrastructure, the maximum storage value is determined by the infrastructure shape. See Characteristics of [Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details. A full Exadata service is allocated when the Autonomous Database size is set to the upper limit (384 TB). | No | + | `spec.clone.cpuCoreCount` | int | The number of CPU cores to be made available to the database. For Autonomous Databases on dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `ocpuCount` parameter. | Conditional | + | `spec.clone.computeModel` | string | The compute model of the Autonomous Database. This is required if using the `computeCount` parameter. If using `cpuCoreCount` then it is an error to specify `computeModel` to a non-null value. ECPU compute model is the recommended model and OCPU compute model is legacy. | Conditional | + | `spec.clone.computeCount` | float32 | The compute amount (CPUs) available to the database. Minimum and maximum values depend on the compute model and whether the database is an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
For an Autonomous Database Serverless instance, the 'ECPU' compute model requires a minimum value of one, for databases in the elastic resource pool and minimum value of two, otherwise. Required when using the `computeModel` parameter. When using `cpuCoreCount` parameter, it is an error to specify computeCount to a non-null value. Providing `computeModel` and `computeCount` is the preferred method for both OCPU and ECPU. | Conditional | + | `spec.clone.ocpuCount` | float32 | The number of OCPU cores to be made available to the database.
The following points apply:
- For Autonomous Databases on Dedicated Exadata infrastructure, to provision less than 1 core, enter a fractional value in an increment of 0.1. For example, you can provision 0.3 or 0.4 cores, but not 0.35 cores. (Note that fractional OCPU values are not supported for Autonomous Database Serverless instances.)
- To provision 1 or more cores, you must enter an integer between 1 and the maximum number of cores available for the infrastructure shape. For example, you can provision 2 cores or 3 cores, but not 2.5 cores. This applies to an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
- For Autonomous Database Serverless instances, this parameter is not used.
For Autonomous Databases on Dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `cpuCoreCount` parameter. | Conditional | + | `spec.clone.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecret.name` or `ociSecret.id` must be provided. If both `k8sSecret.name` and `ociSecret.id` appear, the Operator reads the password from the K8s secret that `k8sSecret.name` refers to. | Yes | + | `spec.clone.adminPassword.k8sSecret.name` | string | The **name** of the K8s Secret where you want to hold the password for the ADMIN user. | Conditional | + |`spec.clone.adminPassword.ociSecret.id` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | + | `spec.clone.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. | Yes | + | `spec.clone.isAutoScalingEnabled` | boolean | Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE` | No | + | `spec.clone.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm). `spec.clone.autonomousContainerDatabase.k8sACD.name` or `spec.clone.autonomousContainerDatabase.ociACD.id` has to be provided if the value is true. | No | + | `spec.clone.isFreeTier` | boolean | Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of memory. For Always Free databases, memory and CPU cannot be scaled.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, or dbName. | No | + | `spec.clone.isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While specifying `whitelistedIps` rules is optional, if database-level access control is enabled and no rules are specified, the database will become inaccessible.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. For Autonomous Database Serverless instances, `whitelistedIps` is used. | No | + | `spec.clone.whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for [Autonomous Database Serverless](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
If `arePrimaryWhitelistedIpsUsed` is 'TRUE' then Autonomous Database uses this primary's IP access control list (ACL) for the disaster recovery peer called `standbywhitelistedips`.
For Autonomous Database Serverless, this is an array of CIDR (classless inter-domain routing) notations for a subnet or VCN OCID (virtual cloud network Oracle Cloud ID).
Multiple IPs and VCN OCIDs should be separate strings separated by commas, but if it’s other configurations that need multiple pieces of information then its each piece is connected with semicolon (;) as a delimiter.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`
For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, isMtlsConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
**Subnet Restrictions:**
- For Autonomous Database, setting this will disable public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | No | + | `spec.clone.nsgIds` | []string | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting this to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rules](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `spec.clone.privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.isMtlsConnectionRequired` | boolean | Specifies if the Autonomous Database requires mTLS connections. | No | + | `spec.clone.autonomousContainerDatabase.k8sACD.name` | string | The **name** of the K8s Autonomous Container Database resource | No | + | `spec.clone.autonomousContainerDatabase.ociACD.id` | string | The Autonomous Container Database [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). | No | + | `spec.clone.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Clone + details: + id: ocid1.autonomousdatabase... + clone: + compartmentId: ocid1.compartment... OR ocid1.tenancy... + dbName: ClonedADB + displayName: ClonedADB + cpuCoreCount: 1 + adminPassword: + k8sSecret: + name: admin-password + dataStorageSizeInTBs: 1 + dbWorkload: OLTP + cloneType: METADATA + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_clone.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +Now, you can verify that a cloned database with name "ClonedADB" is being provisioned on the Cloud Console. + +## Roles and Privileges requirements for Oracle Autonomous Database Controller + +Autonomous Database controller uses Kubernetes objects such as: + + | Resources | Verbs | + | --- | --- | + | Configmaps | get list watch create update patch delete | + | Secrets | get list watch create update patch delete | + | Events | create patch | + +The defintion of all the Kubernetes Objects, which are to be used by the Oracle Autonomous Database Controller, comes from the `oracle-database-operator.yaml` file which is applied to deploy the **Oracle Database Operator**. + +## Debugging and troubleshooting + +### Show the details of the resource + +If you edit and reapply the `.yaml` file, then the Autonomous Database controller will only update the parameters that the file contains. The parameters that are not in the file will not be updated. To obtain the verbose output of the current spec, use the following command: + +```sh +kubectl describe adb/autonomousdatabase-sample +``` + +If any error occurs during the reconciliation loop, then the operator reports the error using the resource's event stream, which shows up in kubectl describe output. + +### Check the logs of the pod where the operator deploys + +To check the logs, use these steps: + +1. List the pod replicas + + ```sh + kubectl get pods -n oracle-database-operator-system + ``` + +2. Use the following command to check the logs of the Pod that has a failure + + ```sh + kubectl logs -f pod/oracle-database-operator-controller-manager-78666fdddb-s4xcm -n oracle-database-operator-system + ``` diff --git a/docs/dbcs/README.md b/docs/dbcs/README.md new file mode 100644 index 00000000..2c06511c --- /dev/null +++ b/docs/dbcs/README.md @@ -0,0 +1,185 @@ +# Using the DB Operator Oracle Base Database Service (OBDS) Controller + +Oracle Cloud Infastructure (OCI) Oracle Base Database Service (OBDS) provides single-node Database (DB) systems, deployed on virtual machines, and provides two-node Oracle Real Application Clusters (Oracle RAC) database systems on virtual machines. + +The single-node DB systems and Oracle RAC systems on virtual machines are [co-managed Oracle Database cloud solutions](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/overview.htm). To manage the lifecycle of an OCI OBDS system, you can use the OCI Console, the REST API, or the Oracle Cloud Infrastructure command-line interface (CLI). At the granular level, you can use the Oracle Database CLI (DBCLI), Oracle Enterprise Manager, or Oracle SQL Developer. + +The Oracle DB Operator Oracle Base Database Service (OBDS) Controller is a feature of the Oracle DB Operator for Kubernetes (OraOperator) which uses OCI's Oracle Base Database Service OBDS service to support lifecycle management of the database systems. + +Note: Oracle Base Database Cloud Service (OBDS) was previously known as Database Cloud Service (DBCS). + +# Supported Database Editions and Versions + +All single-node OCI Oracle RAC DB systems support the following Oracle Database editions: + +- Standard Edition +- Enterprise Edition +- Enterprise Edition - High Performance +- Enterprise Edition - Extreme Performance + + +Two-node Oracle RAC DB systems require Oracle Enterprise Edition - Extreme Performance. + +For standard provisioning of DB systems (using Oracle Automatic Storage Management (ASM) as your storage management software), the following database releases are supported: + +- Oracle Database 23ai +- Oracle Database 19c + +For fast provisioning of single-node virtual machine database systems (using Logical Volume Manager as your storage management software), the following database releases are supported: + +- Oracle Database 23ai +- Oracle Database 19c + + +# Oracle DB Operator Oracle Base Database Service (OBDS) Controller Deployment + +To deploy Oracle Database Operator (`OraOperator`), use the [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. + +After the Oracle Database Operator is deployed, you can see the DB operator pods running in the Kubernetes Cluster. As part of the `OraOperator` deployment, the OBDS Controller is deployed as a CRD (Custom Resource Definition). The following screen output is an example of such a deployment: +```bash +[root@test-server oracle-database-operator]# kubectl get ns +NAME STATUS AGE +cert-manager Active 33d +default Active 118d +kube-node-lease Active 118d +kube-public Active 118d +kube-system Active 118d +oracle-database-operator-system Active 10m <<<< namespace to deploy the Oracle Database Operator + + +[root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-678f96f5f4-f4rhq 1/1 Running 0 10m +pod/oracle-database-operator-controller-manager-678f96f5f4-plxcp 1/1 Running 0 10m +pod/oracle-database-operator-controller-manager-678f96f5f4-qgcg8 1/1 Running 0 10m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.197.164 8443/TCP 11m +service/oracle-database-operator-webhook-service ClusterIP 10.96.35.62 443/TCP 11m + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 11m + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-6657bfc664 0 0 0 11m +replicaset.apps/oracle-database-operator-controller-manager-678f96f5f4 3 3 3 10m + +[root@test-server oracle-database-operator]# kubectl get crd +NAME CREATED AT +autonomousdatabasebackups.database.oracle.com 2022-02-08T18:28:55Z +autonomousdatabaserestores.database.oracle.com 2022-02-08T18:28:55Z +autonomousdatabases.database.oracle.com 2022-02-22T23:23:25Z +certificaterequests.cert-manager.io 2022-02-22T23:21:35Z +certificates.cert-manager.io 2022-02-22T23:21:36Z +challenges.acme.cert-manager.io 2022-02-22T23:21:36Z +clusterissuers.cert-manager.io 2022-02-22T23:21:36Z +dbcssystems.database.oracle.com 2022-02-22T23:23:25Z <<<< CRD for OBDS Controller +issuers.cert-manager.io 2022-02-22T23:21:36Z +orders.acme.cert-manager.io 2022-02-22T23:21:37Z +shardingdatabases.database.oracle.com 2022-02-22T23:23:25Z +singleinstancedatabases.database.oracle.com 2022-02-22T23:23:25Z +``` + + +# Prerequisites to deploy a OBDS system using Oracle DB Operator OBDS Controller + +Before you deploy a OBDS system in OCI using the Oracle DB Operator OBDS Controller, complete the following procedure. + +**CAUTION :** You must make the changes specified in this section before you proceed to the next section. + +## 1. Create a Kubernetes Configmap. In this example. we create a Kubernetes Configmap named `oci-cred` with the OCI account we are using: + +```bash +kubectl create configmap oci-cred \ +--from-literal=tenancy= \ +--from-literal=user= \ +--from-literal=fingerprint= \ +--from-literal=region=us-phoenix-1 +``` + + +## 2. Create a Kubernetes secret `oci-privatekey` using the OCI Pem key taken from OCI console for the account you are using: + +```bash +#---assuming the OCI Pem key to be "/root/.oci/oci_api_key.pem" + +kubectl create secret generic oci-privatekey --from-file=privatekey=/root/.oci/oci_api_key.pem +``` + + +## 3. Create a Kubernetes secret named `admin-password`; This passward must meet the minimum passward requirements for the OCI OBDS Service. +For example: + +```bash +#-- assuming the passward has been added to a text file named "admin-password": + +kubectl create secret generic admin-password --from-file=./admin-password -n default +``` + + +## 4. Create a Kubernetes secret named `tde-password`; this passward must meet the minimum passward requirements for the OCI OBDS Service. +For example: + +```bash +# -- assuming the passward has been added to a text file named "tde-password": + +kubectl create secret generic tde-password --from-file=./tde-password -n default +``` + + +## 5. Create an SSH key pair, and use its public key to create a Kubernetes secret named `oci-publickey`; the private key for this public key can be used later to access the OBDS system's host machine using SSH: + +```bash +[root@test-server OBDS]# ssh-keygen -N "" -C "DBCS_System"-`date +%Y%m` -P "" +Generating public/private rsa key pair. +Enter file in which to save the key (/root/.ssh/id_rsa): +Your identification has been saved in /root/.ssh/id_rsa. +Your public key has been saved in /root/.ssh/id_rsa.pub. +The key fingerprint is: +SHA256:+SuiES/3m9+iuIVyG/QBQL1x7CfRsxtvswBsaBuW5iE DBCS_System-202203 +The key's randomart image is: ++---[RSA 2048]----+ +| .o. . . | +| .o + o | +| .O . o | +| E X.*.+ | +| .*.=S+ + | +| +oo oo + | +| + * o .o o | +| *.*...o. | +| ..+o==o.. | ++----[SHA256]-----+ + + +[root@test-server OBDS]# kubectl create secret generic oci-publickey --from-file=publickey=/root/DBCS/id_rsa.pub +``` + +# Use Cases to manage the lifecycle of an OCI OBDS System with Oracle DB Operator OBDS Controller + +For more informatoin about the multiple use cases available to you to deploy and manage the OCI OBDS Service-based database using the Oracle DB Operator OBDS Controller, review this list: + +[1. Deploy a DB System using OCI OBDS Service with minimal parameters](./provisioning/dbcs_service_with_minimal_parameters.md) +[2. Binding to an existing OBDS System already deployed in OCI Oracle Base Database Service](./provisioning/bind_to_existing_dbcs_system.md) +[3. Scale UP the shape of an existing OBDS System](./provisioning/scale_up_dbcs_system_shape.md) +[4. Scale DOWN the shape of an existing OBDS System](./provisioning/scale_down_dbcs_system_shape.md) +[5. Scale UP the storage of an existing OBDS System](./provisioning/scale_up_storage.md) +[6. Update License type of an existing OBDS System](./provisioning/update_license.md) +[7. Terminate an existing OBDS System](./provisioning/terminate_dbcs_system.md) +[8. Create OBDS with All Parameters with Storage Management as LVM](./provisioning/dbcs_service_with_all_parameters_lvm.md) +[9. Create OBDS with All Parameters with Storage Management as ASM](./provisioning/dbcs_service_with_all_parameters_asm.md) +[10. Deploy a 2 Node RAC DB System using OCI OBDS Service](./provisioning/dbcs_service_with_2_node_rac.md) +[11. Create PDB to an existing OBDS System already deployed in OCI OBDS Service](./provisioning/create_pdb_to_existing_dbcs_system.md) +[12. Create OBDS with PDB in OCI](./provisioning/create_dbcs_with_pdb.md) +[13. Create OBDS with KMS Vault Encryption in OCI](./provisioning/create_dbcs_with_kms.md) +[14. Migrate to KMS vault from TDE Wallet password encryption of an existing OBDS System already deployed in OCI Base OBDS Service](./provisioning/migrate_to_kms.md) +[15. Clone DB System from Existing DB System in OCI OBDS Service](./provisioning/clone_from_existing_dbcs.md) +[16. Clone DB System from Backup of Existing DB System in OCI OBDS Service](./provisioning/clone_from_backup_dbcs.md) +[17. Clone DB System from Existing Database of DB System in OCI OBDS Service](./provisioning/clone_from_database.md) + +## Connecting to OCI OBDS database deployed using Oracle DB Operator OBDS Controller + +After you have deployed the OCI OBDS database with the Oracle DB Operator OBDS Controller, you can connect to the database. To see how to connect and use the database, refer to the steps in [Database Connectivity](./provisioning/database_connection.md). + +## Known Issues + +If you encounter any issues with deployment, refer to the list of [Known Issues](./provisioning/known_issues.md) for an OCI OBDS System deployed using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md new file mode 100644 index 00000000..eced7538 --- /dev/null +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md @@ -0,0 +1,32 @@ +# Binding to an existing OBDS System already deployed in OCI Oracle Base Database Service + +In this use case, we bind the Oracle DB Operator OBDS Controller to an existing OCI OBDS System which has already been deployed earlier. This will help to manage the life cycle of that OBDS System using the Oracle DB Operator OBDS Controller. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `bind_to_existing_dbcs_system.yaml` to bind to an existing OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred-mumbai` +- OCI Secret as `oci-privatekey` +- OCID of the existing OBDS System as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` + + +Use the file: [bind_to_existing_dbcs_system.yaml](./bind_to_existing_dbcs_system.yaml) for this use case as below: + +1. Deploy the .yaml file: +```bash +kubectl apply -f bind_to_existing_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-existing created +``` + +2. Monitor the Oracle DB Leader Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./bind_to_existing_dbcs_system_sample_output.log) is the sample output for binding to an existing OBDS System already deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml new file mode 100644 index 00000000..6ff24bc8 --- /dev/null +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml @@ -0,0 +1,8 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log b/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log new file mode 100644 index 00000000..f6505337 --- /dev/null +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log @@ -0,0 +1,108 @@ +[root@docker-test-server test]# cat bind_to_existing_dbcs_system.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f bind_to_existing_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-existing created +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-08T23:27:48.625Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:27:52.513Z INFO controller-runtime.manager.controller.dbcssystem Sync information from remote DbcsSystem System successfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"","availabilityDomain":"","subnetId":"","shape":"","hostName":"","dbAdminPaswordSecret":"","dbBackupConfig":... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-06T15:16:07Z + Generation: 1 + Resource Version: 116146012 + UID: 375b1bea-9b69-4b86-a2b1-fe7750608913 +Spec: + Db System: + Availability Domain: + Compartment Id: + Db Admin Pasword Secret: + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htya6crmqdnyz5h7ngpi4azbhndm6ssdmyn7yxk2uhbvxala + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htya6crmqdnyz5h7ngpi4azbhndm6ssdmyn7yxk2uhbvxala + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard.E5.Flex + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljro3fhuxevjwxlue5gqq63q7rd7uhub2ru6gd6ay6k35f4hdeqqxkq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-06 12:12:04.031 +0000 UTC + Time Finished: 2024-12-06 13:01:20.457 +0000 UTC + Time Started: 2024-12-06 12:12:11.041 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system.yaml b/docs/dbcs/provisioning/clone_dbcs_system.yaml new file mode 100644 index 00000000..fd6cc1d4 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system.yaml @@ -0,0 +1,20 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyaqui4hoqdyzmzl65jwkncyp3bnohengniqienetsdzw2q" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbClone: + dbAdminPasswordSecret: "admin-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml b/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml new file mode 100644 index 00000000..54280af9 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml @@ -0,0 +1,22 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbBackupId: "ocid1.dbbackup.oc1.ap-mumbai-1.anrg6ljrabf7htyaae3fmnpacavkuwt2zqaj5q3gol2g6m6tirriveytoarq" + dbClone: + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + initialDataStorageSizeInGB: 256 \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log new file mode 100644 index 00000000..82531993 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log @@ -0,0 +1,75 @@ +2024-09-18T12:55:33Z INFO Starting the clone process for DBCS from backup {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone"}} +2024-09-18T12:55:33Z INFO Retrieved existing Db System Details from OCI using Spec.Id {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:55:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:56:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:57:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:58:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:59:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:00:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:01:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:02:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:03:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:04:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:05:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:06:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:07:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:08:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:09:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:10:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:11:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:12:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:13:57Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:14:58Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:15:59Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:17:00Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:18:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:19:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:20:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:21:03Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:22:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:23:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:24:06Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:25:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:26:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:27:09Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:28:10Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:29:11Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:30:12Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:31:13Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:32:14Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:33:15Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:34:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:35:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:36:17Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:37:18Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:38:19Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:39:20Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:40:21Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:41:22Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:42:23Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:43:23Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:44:24Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:45:25Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:46:26Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:47:27Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:48:28Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:49:29Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:50:30Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:51:31Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:52:32Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:53:32Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:54:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:55:34Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:56:35Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:57:36Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:58:37Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:59:38Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:00:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:01:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:02:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:03:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:04:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:05:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:06:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:07:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:08:46Z INFO DB Cloning completed successfully from provided backup DB system. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml b/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml new file mode 100644 index 00000000..40767739 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml @@ -0,0 +1,22 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + databaseId: "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyapxtsgw6hy3kyosmrawefq2csm4kjv4d5au7biuiaabsq" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbClone: + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + initialDataStorageSizeInGB: 256 \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log new file mode 100644 index 00000000..2881051d --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log @@ -0,0 +1,39 @@ +2024-09-19T19:23:08Z INFO Starting the clone process for Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone"}} +2024-09-19T19:23:08Z INFO Retrieved passwords from Kubernetes secrets {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1"} +2024-09-19T19:23:09Z INFO Retrieved existing Database details from OCI {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1", "DatabaseId": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyapxtsgw6hy3kyosmrawefq2csm4kjv4d5au7biuiaabsq"} +2024-09-20T08:51:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:52:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:53:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:54:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:55:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:56:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:57:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:58:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:59:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:00:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:01:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:02:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:03:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:04:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:52:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:53:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:54:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:55:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:56:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:57:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:58:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:59:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T10:00:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T10:01:47Z INFO DB Cloning completed successfully from provided backup DB system {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log new file mode 100644 index 00000000..22d86e1e --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log @@ -0,0 +1,60 @@ +2024-09-17T11:40:26Z INFO Starting the clone process for DBCS {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone.yaml"}} +2024-09-17T11:40:26Z INFO Retrieved passwords from Kubernetes secrets {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:40:26Z INFO Retrieved existing Db System Details from OCI using Spec.Id {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:40:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:41:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:42:34Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:43:35Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:44:36Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:45:37Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:46:38Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:47:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:48:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:49:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:50:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:51:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:52:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:53:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:54:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:55:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:56:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:57:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:58:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:59:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:00:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:01:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:02:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:03:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:04:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:05:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:06:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:07:57Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:08:58Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:09:59Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:11:00Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:12:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:13:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:14:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:15:03Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:16:04Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:17:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:18:06Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:19:07Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:20:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:21:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:22:09Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:23:10Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:24:11Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:25:12Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:26:13Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:27:14Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:28:15Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:29:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:30:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:31:17Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:32:18Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:33:19Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:34:20Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:35:21Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:36:22Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:36:22Z INFO DB Cloning completed successfully from provided db system {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} diff --git a/docs/dbcs/provisioning/clone_from_backup_dbcs.md b/docs/dbcs/provisioning/clone_from_backup_dbcs.md new file mode 100644 index 00000000..4597cff7 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_backup_dbcs.md @@ -0,0 +1,36 @@ +# Clone DB System from Backup of Existing DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier with the Backup is going to be cloned. + +In order to clone OBDS to an existing OBDS system using Backup, get the details of OCID of backup in OCI OBDS. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system_from_backup.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- OCID of Backup DB as `dbBackupId` of existing OBDS system. +- Specification for DB Cloning as `dbClone`-> `dbAdminPasswordSecret`,`tdeWalletPasswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`, `initialDataStorageSizeInGB` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system_from_backup.yaml](./clone_dbcs_system_from_backup.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f clone_dbcs_system_from_backup.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_from_backup_sample_output.log) is the sample output for cloning an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/clone_from_database.md b/docs/dbcs/provisioning/clone_from_database.md new file mode 100644 index 00000000..05b294b5 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_database.md @@ -0,0 +1,35 @@ +# Clone DB System from Existing Database of DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier with existing Database is going to be cloned in OCI Base OBDS Service using existing Database ID. + +As an pre-requisite, get the details of OCID of database of an existing OBDS System which you want to clone. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system_from_database.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: +- OCID of existing as `databaseId` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- Specification of dbClone as - Details of new DB system for cloning `dbAdminPasswordSecret`,`tdeWalletPasswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`, `initialDataStorageSizeInGB` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system_from_database.yaml](./clone_dbcs_system_from_database.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f clone_dbcs_system_from_database.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_from_database_sample_output.log) is the sample output for cloning an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/clone_from_existing_dbcs.md b/docs/dbcs/provisioning/clone_from_existing_dbcs.md new file mode 100644 index 00000000..61665188 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_existing_dbcs.md @@ -0,0 +1,36 @@ +# Clone DB System from Existing DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier is going to be cloned in OCI Oracle Base Database System (OBDS). Its a 2 Step operation. + +In order to clone OBDS to an existing OBDS system, get the OCID of DB System ID you want to clone. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `id` to be cloned. +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- Specification of DB System been cloned as `dbClone` -> `dbAdminPaswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`. These must be unique and new details for new cloned DB system to be created. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system.yaml](./clone_dbcs_system.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f clone_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_sample_output.log) is the sample output for cloning an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_dbcs_with_kms.md b/docs/dbcs/provisioning/create_dbcs_with_kms.md new file mode 100644 index 00000000..97d912d4 --- /dev/null +++ b/docs/dbcs/provisioning/create_dbcs_with_kms.md @@ -0,0 +1,73 @@ +# Deploy a OBDS DB System alongwith KMS Vault Encryption in OCI + +In this use case, an OCI OBDS system is deployed using Oracle DB Operator OBDS controller along with KMS Vault configuration + +**NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +## Pre-requisites for KMS Vaults related to OBDS System +There is also other set of pre-requisites for KMS Vaults related to dynamic group and policies. Please follow instructions for same. +1. Create Dynamic group with rule `ALL {resource.compartment.id =` and give it some name. +2. Create policy in your compartment for this dynamic group to access to key/vaults by database. + +```txt +Allow dynamic-group <> to manage secret-family in compartment <> +Allow dynamic-group <> to manage instance-family in compartment <> +Allow dynamic-group <> to manage database-family in compartment <> +Allow dynamic-group <> to manage keys in compartment <> +Allow dynamic-group <> to manage vaults in compartment <> +``` + +E.g + +```txt +ALL {resource.compartment.id = 'ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a'} +``` +```txt +Allow dynamic-group db_dynamic_group to manage secret-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage instance-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage database-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage keys in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage vaults in compartment sauahuja +``` +3. Do also create KMS Vault and KMS Key in order to use it during OBDS provisioning. We are going to refer those variables (`vaultName`, `keyName`) in the yaml file. + +This example uses `dbcs_service_with_kms.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Name as `kmsdb` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Database Hostname Prefix as `kmshost` +- Oracle VMDB Shape as `VM.Standard2.2` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- domain `subdda0b5eaa.cluster1.oraclevcn.com` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` +- KMS Vault Name as `dbvault` +- KMS Compartment Id as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- KMS Key Name as `dbkey` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). While giving KMS Vault make sure not to pass TDE wallet password in DB creation as either of them can be only used for encryption. + +Use the file: [dbcs_service_with_kms.yaml](./dbcs_service_with_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```bash +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_kms.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_kms_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with KMS configurations. diff --git a/docs/dbcs/provisioning/create_dbcs_with_pdb.md b/docs/dbcs/provisioning/create_dbcs_with_pdb.md new file mode 100644 index 00000000..d68a1991 --- /dev/null +++ b/docs/dbcs/provisioning/create_dbcs_with_pdb.md @@ -0,0 +1,55 @@ +# Deploy a OBDS DB System using OCI Oracle Base Database System (OBDS) alongwith PDB + +In this use case, an OCI OBDS system is deployed using Oracle DB Operator OBDS controller along with PDB configuration + +**NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +Also, create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `dbcs_service_with_pdb.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:US-ASHBURN-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Name as `dbsystem24` +- Oracle Database Software Image Version as `21c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Database Hostname Prefix as `host24` +- Cpu Core Count as `1` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- domain `subd215df3e6.k8stest.oraclevcn.com` +- OCID of the Subnet as `ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua` +- PDB Name as `pdb_sauahuja_11` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_pdb.yaml](./dbcs_service_with_pdb.yaml) for this use case as below: + +1. Deploy the .yaml file: +```bash +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_pdb.yaml +dbcssystem.database.oracle.com/dbcssystem-create-with-pdb created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_pdb_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with PDB configurations. diff --git a/docs/dbcs/provisioning/create_kms.md b/docs/dbcs/provisioning/create_kms.md new file mode 100644 index 00000000..43db7037 --- /dev/null +++ b/docs/dbcs/provisioning/create_kms.md @@ -0,0 +1,50 @@ +# Create and update KMS vault to an existing DBCS System already deployed in OCI Base DBCS Service + +In this use case, an existing OCI DBCS system deployed earlier is going to have KMS Vault created and update DBCS System in OCI. Its a 2 Step operation. + +In order to create KMS Vaults to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to create KMS Vaults. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + + +This example uses `dbcs_service_with_kms.yaml` to create KMS Vault to existing DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Existing `dbSystem` used before to create DBCS system. +- kmsConfig - vaultName as "basdbvault" as an example. +- kmsConfig - keyName as "dbvaultkey" as an example. +- kmsConfig - compartmentId as "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" as an example. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_kms.yaml](./dbcs_service_with_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of KMS Vaults. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createkms_in_existing_dbcs_system_sample_output.log) is the sample output for creation of KMS Vaults on an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_pdb.md b/docs/dbcs/provisioning/create_pdb.md new file mode 100644 index 00000000..610ccd41 --- /dev/null +++ b/docs/dbcs/provisioning/create_pdb.md @@ -0,0 +1,55 @@ +# Create PDB to an existing DBCS System + +In this use case, an existing OCI DBCS system deployed earlier is going to have PDB/PDBs created. Its a 2 Step operation. + +In order to create PDBs to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to create PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +Create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `createpdb_in_existing_dbcs_system_list.yaml` to scale up a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [createpdb_in_existing_dbcs_system_list.yaml](./createpdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createpdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for creation of PDBs on an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md b/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md new file mode 100644 index 00000000..d1c4ed5b --- /dev/null +++ b/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md @@ -0,0 +1,55 @@ +# Create PDB to an existing OBDS System + +In this use case, an existing OCI OBDS system deployed earlier is going to have a PDB/many PDBs created. Its a 2 Step operation. + +In order to create PDBs to an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to create PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing OBDS System to OBDS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +Create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `createpdb_in_existing_dbcs_system_list.yaml` to scale up a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [createpdb_in_existing_dbcs_system_list.yaml](./createpdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createpdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for creation of PDBs on an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log b/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log new file mode 100644 index 00000000..18ac916e --- /dev/null +++ b/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log @@ -0,0 +1 @@ +# To be added \ No newline at end of file diff --git a/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml new file mode 100644 index 00000000..589ce0cf --- /dev/null +++ b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml @@ -0,0 +1,27 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + pdbConfigs: + - pdbName: "pdb_sauahuja_sdk_13" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "Finance" + - pdbName: "pdb_sauahuja_sdk_14" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "HR" + - pdbName: "pdb_sauahuja_sdk_15" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "IT" \ No newline at end of file diff --git a/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log new file mode 100644 index 00000000..9bee73c8 --- /dev/null +++ b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log @@ -0,0 +1,185 @@ +2024-08-15T14:14:55Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:14:55Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:14:55Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13"} +2024-08-15T14:14:55Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:14:55Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:14:55Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13"} +2024-08-15T14:14:56Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq"} +2024-08-15T14:14:56Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:15:26Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:15:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:16:27Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:16:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:17:27Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:17:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "AVAILABLE"} +2024-08-15T14:17:57Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq"} +2024-08-15T14:17:59Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:17:59Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:17:59Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14"} +2024-08-15T14:17:59Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:17:59Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:18:00Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14"} +2024-08-15T14:18:00Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq"} +2024-08-15T14:18:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:18:31Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:19:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:19:31Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:20:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:20:32Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:21:02Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "AVAILABLE"} +2024-08-15T14:21:02Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq"} +2024-08-15T14:21:03Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:21:03Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:21:03Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15"} +2024-08-15T14:21:03Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:21:03Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:21:04Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15"} +2024-08-15T14:21:05Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla"} +2024-08-15T14:21:05Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:21:35Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:22:05Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:22:36Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:23:06Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "AVAILABLE"} +2024-08-15T14:23:06Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla"} + + +# kubectl describe dbcssystems.database.oracle.com +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"","availabilityDomain":"","subnetId":"","shape":"","hostName":"","dbAdminPaswordSecret":"","dbBackupConfig":... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-10T10:54:17Z + Generation: 4 + Resource Version: 117823935 + UID: c9da1245-3582-4926-b311-c24d75e75003 +Spec: + Db System: + Availability Domain: + Compartment Id: + Db Admin Pasword Secret: + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey + Pdb Configs: + Freeform Tags: + Department: Finance + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_13 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: HR + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_14 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: IT + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_15 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Department: IT + Pdb Name: pdb_sauahuja_sdk_15 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyazfddwgjlmpm3tctcnmqe7zwefzghr4wttij6u4lhh7bq + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Pdb Config Status: + Freeform Tags: + Department: Finance + Pdb Name: pdb_sauahuja_sdk_13 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakkcrbhf6cit3z2hbcvded5g2rc7r5obbxeax7dv527xq + Pdb Config Status: + Freeform Tags: + Department: HR + Pdb Name: pdb_sauahuja_sdk_14 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyaqnht5ctcopuntaj74ptum27tbdk5rouvnfq5f2y3eyna + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr36bt7ot5oq3otch4bu2axn3azkicot4zuwgwmxeupxr4siisydja + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:49.369 +0000 UTC + Time Finished: 2024-12-10 11:58:45.01 +0000 UTC + Time Started: 2024-12-10 11:44:55.544 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxdpmmaipuqke5yx3szyfnf2zwkfptz3jevlq3coicecfjihnm4kq + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:55.255 +0000 UTC + Time Finished: 2024-12-10 11:58:25.229 +0000 UTC + Time Started: 2024-12-10 11:44:57.743 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/database_connection.md b/docs/dbcs/provisioning/database_connection.md new file mode 100644 index 00000000..66ac2b5d --- /dev/null +++ b/docs/dbcs/provisioning/database_connection.md @@ -0,0 +1,53 @@ +## Database connection + +In order to retrieve the database connection use the kubectl describe command + +```sh +kubectl describe dbcssystems.database.oracle.com dbcssystem-create +``` + +You can use the following script (tnsalias.awk) to get a simple tnsalias + +```awk +!#/usr/bin/awk +( $0 ~ / Db Unique Name:/ ) { DB_UNIQUE_NAME=$4 } +( $0 ~ /Domain Name:/ ) { DB_DOMAIN=$3 } +( $0 ~ /Host Name:/ ) { HOSTNAME=$3 } +( $0 ~ /Listener Port:/ ) { PORT=$3 } + +END { + printf ("db_unique_name=%s\n",DB_UNIQUE_NAME); + printf ("db_domain=%s\n",DB_DOMAIN); + printf ("hostname=%s\n",HOSTNAME); + printf ("port=%s\n",PORT); + printf ("====== TNSALIAS ======\n"); + printf ("(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=%s.%s)))\n", + HOSTNAME,PORT,DB_UNIQUE_NAME,DB_DOMAIN); +``` + +```text +kubectl describe dbcssystems.database.oracle.com dbcssystem-create |awk -f tnsalias.awk +db_unique_name=testdb_fg4_lin +db_domain=vcndns.oraclevcn.com +hostname=host1205 +port=1521 +====== TNSALIAS ====== +(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=host1205)(PORT=1521))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=testdb_fg4_lin.vcndns.oraclevcn.com))) + +sqlplus scott@"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=host1205)(PORT=1521))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=testdb_fg4_lin.vcndns.oraclevcn.com)))" + + +SQL*Plus: Release 19.0.0.0.0 - Production on Fri Dec 15 14:16:42 2023 +Version 19.15.0.0.0 + +Copyright (c) 1982, 2022, Oracle. All rights reserved. + +Enter password: +Last Successful login time: Fri Dec 15 2023 14:14:07 +00:00 + +Connected to: +Oracle Database 19c EE High Perf Release 19.0.0.0.0 - Production +Version 19.18.0.0.0 + +SQL> +``` diff --git a/docs/dbcs/provisioning/dbcs_controller_parameters.md b/docs/dbcs/provisioning/dbcs_controller_parameters.md new file mode 100644 index 00000000..96bedf30 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_controller_parameters.md @@ -0,0 +1,33 @@ +# Oracle DB Operator DBCS Controller - Parameters to use in .yaml file + +This page has the details of the parameters to define the specs related to an operation to be performed for an OCI DBCS System to be managed using Oracle DB Operator DBCS Controller. + +| Parameter Name | Description | Mandatory Parameter? (Y/N) | Parameter Value type | Default Value (If Any) | Allowed Values (If Any) | +| -------------- | ---------- | ------- | ------- | ------- | ------- | +| ociConfigMap | Kubernetes Configmap created for OCI account in the prerequisites steps. | Y | String | | | +| ociSecret | Kubernetes Secret created using PEM Key for OCI account in the prerequisites steps. | Y | String | | | +| availabilityDomain | Availability Domain of the OCI region where you want to provision the DBCS System. | Y | String | | Please refer to this link: https://docs.oracle.com/en-us/iaas/Content/General/Concepts/regions.htm | +| compartmentId | OCID of the OCI Compartment. | Y | String | | | +| dbAdminPasswordSecret | Kubernetes Secret created for DB Admin Account in prerequisites steps. | Y | String | | A strong password for SYS, SYSTEM, and PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, #, or -.| +| autoBackupEnabled | Whether to enable automatic backup or not. | N | Boolean | | True or False | +| autoBackupWindow | Time window selected for initiating automatic backup for the database system. There are twelve available two-hour time windows. | N | String | | Please refer to this link: https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/DbBackupConfig | +| recoveryWindowsInDays | Number of days between the current and the earliest point of recoverability covered by automatic backups. | N | Integer | | Minimum: 1 and Maximum: 60 | +| dbEdition | Oracle Database Software Edition. | N | String | | STANDARD_EDITION or ENTERPRISE_EDITION or ENTERPRISE_EDITION_HIGH_PERFORMANCE or ENTERPRISE_EDITION_EXTREME_PERFORMANCE | +| dbName | The database name. | Y | String | | The database name cannot be longer than 8 characters. It can only contain alphanumeric characters. | +| dbVersion | The Oracle Database software version. | Y | String | | Min lenght: 1 and Max length: 255 | +| dbWorkload | The database workload type. | Y | String | | OLTP or DSS | +| diskRedundancy | The type of redundancy configured for the DB system. NORMAL is 2-way redundancy. HIGH is 3-way redundancy. | N | String | | HIGH or NORMAL | +| displayName | The user-friendly name for the DB system. The name does not have to be unique. | N | String | | Min length: 1 and Max length: 255 | +| hostName | The hostname for the DB system. | Y | String | | Hostname can contain only alphanumeric and hyphen (-) characters. | +| initialDataStorageSizeInGB | Size (in GB) of the initial data volume that will be created and attached to a virtual machine DB system. | N | Integer | | Min Value in GB: 2 | +| licenseModel | The Oracle license model that applies to all the databases on the DB system. | N | String | LICENSE_INCLUDED | LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE | +| nodeCount | The number of nodes in the DB system. For RAC DB systems, the value is greater than 1. | N | Integer | | Minimum: 1 | +| pdbName | The name of the pluggable database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. | N | String | | The PDB name can contain only alphanumeric and underscore (_) characters. | +| privateIp | A private IP address of your choice. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. | N | String | | Min length: 1 and Max length: 46 | +| shape | The shape of the DB system. The shape determines resources to allocate to the DB system. | Y | String | | Please refer to this link for the available shapes: https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/overview.htm | +| sshPublicKeys | Kubernetes secret created with the Public Key portion of the key pair created to access the DB System. | Y | String | | | +| storageManagement | The storage option used in DB system. ASM - Automatic storage management LVM - Logical Volume management. | N | String | | ASM or LVM | +| subnetId | The OCID of the subnet the DB system is associated with. | Y | String | | | +| tags | Tags for the DB System resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. | N | String | | | +| tdeWalletPasswordSecret | The Kubernetes secret for the TDE Wallet password. | N | String | | | +| timeZone | The time zone of the DB system. | N | String | | Please refer to this link: https://docs.oracle.com/en-us/iaas/Content/Database/References/timezones.htm#Time_Zone_Options | diff --git a/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log new file mode 100644 index 00000000..2405a90a --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log @@ -0,0 +1,132 @@ +2025-01-10T14:30:21Z INFO Updating KMS details in Existing Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO MigrateVaultKey request succeeded, waiting for database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO Starting to wait for the database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "DesiredState": "AVAILABLE", "Timeout": "2h0m0s"} +2025-01-10T14:30:27Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:30:27Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:31:28Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:31:28Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:32:29Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:32:29Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:33:30Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:33:30Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:34:31Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:34:31Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:35:32Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:35:32Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:36:33Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:36:33Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:37:34Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:37:34Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:38:35Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:38:35Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:30:21Z INFO Updating KMS details in Existing Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO MigrateVaultKey request succeeded, waiting for database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO Starting to wait for the database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "DesiredState": "AVAILABLE", "Timeout": "2h0m0s"} +2025-01-10T14:30:27Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:30:27Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:31:28Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:31:28Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:32:29Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:32:29Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:33:30Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:33:30Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:34:31Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:34:31Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:35:32Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:35:32Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:36:33Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:36:33Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:37:34Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:37:34Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:38:35Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:38:35Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:40:37Z INFO Database reached the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "State": "AVAILABLE"} +2025-01-10T14:40:39Z INFO KMS migration process completed successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} + + +basedb/ $ kubectl describe dbcssystems.database.oracle.com/dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":""... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2025-01-10T14:29:37Z + Generation: 2 + Resource Version: 130979222 + UID: f7535120-dd4a-4cbc-9e29-b9f104904773 +Spec: + Db System: + Availability Domain: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Password Secret: admin-password + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Tde Wallet Password Secret: tde-password + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua + Kms Config: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Name: dbkey + Vault Name: dbvault + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxia3s627hwjr36bix3dnh4dlbny22tzcmb2a3b4rcp74clq + Db Name: cdb12 + Db Unique Name: cdb12_hf8_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq + Display Name: dbsys123 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua + Kms Details Status: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Id: ocid1.key.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljr63rcu5h6lmaeux752pzmp334zihovh3n2acags6zt37emab34yba + Key Name: dbkey + Management Endpoint: https://fbtxxaolaaavw-management.kms.ap-mumbai-1.oraclecloud.com + Vault Id: ocid1.vault.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljrbjokn2fwhh36tqzyog4yjrth3mj2emxea4fxmzw6z35zlmh65p2a + Vault Name: dbvault + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host01234 + Listener Port: 1521 + Scan Dns Name: host01234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Pdb Name: PDB0123 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyafnt7gvokjw7cvzs6xjxw5nmlz6awzycqcnf57blcuefa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljroc46ic555q2rfcwxg3srsbq4indueiuvj7tlziyy63uz3pvpe4ra + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2025-01-10 06:30:12.411 +0000 UTC + Time Finished: 2025-01-10 07:51:04.59 +0000 UTC + Time Started: 2025-01-10 06:30:20.62 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml new file mode 100644 index 00000000..922f7eeb --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml @@ -0,0 +1,16 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + kmsConfig: + vaultName: "dbvault" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + keyName: "dbkey" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md new file mode 100644 index 00000000..b9ce6931 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md @@ -0,0 +1,59 @@ +# Deploy a 2 Node RAC DB System using OCI OBDS Service + +In this use case, a 2 Node RAC OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `dbcs_service_with_2_node_rac.yaml` to deploy a 2 Node RAC VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Cluster Name as `maa-cluster` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` +- Recovery Windows for Backup retention in days as `15` +- Oracle Database Edition as `STANDARD_EDITION` +- Database Name as `db0130` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Redundancy of the ASM Disks as `EXTERNAL` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` +- Initial Size of the DATA Storage in GB as `256` +- License Model as `BRING_YOUR_OWN_LICENSE` +- Name of the PDB to be created as `PDB0123` +- Private IP explicitly assigned to be `10.0.1.99` +- Node count as `2` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- Storage Management type as `ASM` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- TDE Wallet Secret as `tde-password` +- Time Zone for the OBDS System as `Europe/Berlin` + + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_all_parameters_asm.yaml](./dbcs_service_with_2_node_rac.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server test]# kubectl apply -f dbcs_service_with_2_node_rac.yaml +dbcssystem.database.oracle.com/dbcssystem-create configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_2_node_rac_sample_output.log) is the sample output for a 2 Node RAC OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as ASM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml new file mode 100644 index 00000000..3b4f35e3 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml @@ -0,0 +1,39 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + clusterName: "maa-cluster" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + diskRedundancy: "EXTERNAL" + hostName: "host01234" + initialDataStorageSizeInGB: 256 + licenseModel: "BRING_YOUR_OWN_LICENSE" + nodeCount: 2 + pdbName: "PDB0130" + privateIp: "10.0.1.99" + shape: "VM.Standard2.2" + sshPublicKeys: + - "oci-publickey" + storageManagement: "ASM" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" diff --git a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac_sample_output.log new file mode 100644 index 00000000..e33e7a86 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac_sample_output.log @@ -0,0 +1,352 @@ +NTERPRISE_EDITION_EXTREME_PERFORMANCE[root@docker-test-server test]# cat dbcs_service_with_2_node_rac.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:PHX-AD-1" + clusterName: "maa-cluster" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" + dbAdminPaswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "ENTERPRISE_EDITION_EXTREME_PERFORMANCE" + dbName: "db0130" + dbVersion: "21c" + dbWorkload: "OLTP" + diskRedundancy: "EXTERNAL" + displayName: "dbsystem0130" + hostName: "host0130" + initialDataStorageSizeInGB: 256 + licenseModel: "BRING_YOUR_OWN_LICENSE" + nodeCount: 2 + pdbName: "PDB0130" + privateIp: "10.0.1.99" + shape: "VM.Standard2.2" + sshPublicKeys: + - "oci-publickey" + storageManagement: "ASM" + subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f dbcs_service_with_2_node_rac.yaml +dbcssystem.database.oracle.com/dbcssystem-create configured + + + + +[root@docker-test-server test]# kubectl get ns + + + +NAME STATUS AGE +cert-manager Active 14d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 14d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 14d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 6 14d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 7 14d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 14d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 14d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 14d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 14d +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T04:56:51.425Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T04:56:51.912Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem DBSystem provisioning {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T04:56:58.650Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T04:57:58.865Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T04:58:59.218Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T04:59:59.440Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:01:00.337Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:02:00.893Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:03:02.191Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:04:02.716Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:05:03.081Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:06:03.311Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:07:03.748Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:08:04.219Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:09:04.561Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:10:05.402Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:11:05.798Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:12:06.382Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:13:06.739Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:14:07.309Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:15:08.005Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:16:08.293Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:17:09.084Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:18:09.600Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:19:09.996Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:20:10.354Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:21:11.059Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:22:11.365Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:23:11.665Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:24:12.008Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:25:12.551Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:26:12.988Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:27:13.371Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:28:13.745Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:29:14.034Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:30:14.407Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:31:14.713Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:32:15.202Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:33:15.451Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:34:15.791Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:35:16.216Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:36:16.526Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:37:17.150Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:38:17.447Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:39:17.790Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:40:18.475Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:41:19.115Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:42:19.717Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:43:20.357Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:44:20.661Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:45:20.888Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:46:21.140Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:47:21.431Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:48:21.902Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:49:22.473Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:50:23.330Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:51:23.947Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:52:24.471Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:53:24.961Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:54:25.256Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:55:25.720Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:56:26.148Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:57:26.807Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:58:27.458Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T05:59:28.274Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:00:28.616Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:01:28.966Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:02:29.594Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:03:29.902Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:04:30.357Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:05:30.791Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:06:31.781Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:07:32.253Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:08:32.581Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:09:32.969Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:10:33.868Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T06:11:34.492Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem system provisioned succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} + + + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-09T04:54:51Z + Generation: 2 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:clusterName: + f:compartmentId: + f:dbAdminPaswordSecret: + f:dbBackupConfig: + .: + f:autoBackupEnabled: + f:autoBackupWindow: + f:recoveryWindowsInDays: + f:dbEdition: + f:dbName: + f:dbVersion: + f:dbWorkload: + f:diskRedundancy: + f:displayName: + f:hostName: + f:initialDataStorageSizeInGB: + f:licenseModel: + f:nodeCount: + f:pdbName: + f:privateIp: + f:shape: + f:sshPublicKeys: + f:storageManagement: + f:subnetId: + f:tags: + .: + f:CreatedBy: + f:TEST: + f:tdeWalletPasswordSecret: + f:timeZone: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-09T04:54:51Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:id: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-09T06:11:37Z + Resource Version: 55318179 + UID: 69389564-7574-4150-b44c-3705ea358800 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Cluster Name: maa-cluster + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Db Backup Config: + Auto Backup Enabled: true + Auto Backup Window: SLOT_FOUR + Recovery Windows In Days: 15 + Db Edition: ENTERPRISE_EDITION_EXTREME_PERFORMANCE + Db Name: db0130 + Db Version: 21c + Db Workload: OLTP + Disk Redundancy: EXTERNAL + Display Name: dbsystem0130 + Host Name: host0130 + Initial Data Storage Size In GB: 256 + License Model: BRING_YOUR_OWN_LICENSE + Node Count: 2 + Pdb Name: PDB0130 + Private Ip: 10.0.1.99 + Shape: VM.Standard2.2 + Ssh Public Keys: + oci-publickey + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Tde Wallet Password Secret: tde-password + Time Zone: Europe/Berlin + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 4 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION_EXTREME_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqanf2nzf4535im4dgfaliqtyeqa24gu3j5cg2u7676wo2q + Db Name: db0130 + Db Unique Name: db0130_phx1td + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyalxkrhk636ibrjzbji7d4fnfm6xbhpizxybllfqzykaca + Display Name: dbsystem0130 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyahapx2mkbpvilwvfhvisk3umch23s3nnz4spx3zthw55a + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 2 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: Europe/Berlin + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrapznsaptxjhk3o2ao5nzq7axpinpekj7lf36qmd6veh4ntg45hxa + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-09 04:56:56.007 +0000 UTC + Time Finished: 2022-03-09 06:08:59.539 +0000 UTC + Time Started: 2022-03-09 04:57:18.983 +0000 UTC +Events: + + + +[root@docker-test-server DBCS]# ssh -i id_rsa opc@129.146.35.79 +The authenticity of host '129.146.35.79 (129.146.35.79)' can't be established. +ECDSA key fingerprint is SHA256:KeuW7n18XXH8mFWnSvcMIeER7NpKyfG4njRpN9Xq/Mk. +ECDSA key fingerprint is MD5:64:e9:52:4f:18:14:fb:eb:ed:48:34:9d:15:80:04:5c. +Are you sure you want to continue connecting (yes/no)? yes +Warning: Permanently added '129.146.35.79' (ECDSA) to the list of known hosts. +[opc@host01301 ~]$ +[opc@host01301 ~]$ +[opc@host01301 ~]$ +[opc@host01301 ~]$ sudo su - grid +Last login: Wed Mar 9 18:23:10 CET 2022 + +[grid@host01301 ~]$ +[grid@host01301 ~]$ cemutlo -n +dbSys3zthw55a diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md new file mode 100644 index 00000000..7bd9abea --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md @@ -0,0 +1,57 @@ +# Create OBDS with All Parameters with Storage Management as ASM + +In this use case, the an OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `dbcs_service_with_all_parameters_asm.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` +- Recovery Windows for Backup retention in days as `15` +- Oracle Database Edition as `STANDARD_EDITION` +- Database Name as `db0130` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Redundancy of the ASM Disks as `EXTERNAL` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` +- Initial Size of the DATA Storage in GB as `256` +- License Model as `BRING_YOUR_OWN_LICENSE` +- Name of the PDB to be created as `PDB0123` +- Private IP explicitly assigned to be `10.0.1.99` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- Storage Management type as `ASM` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- TDE Wallet Secret as `tde-password` +- Time Zone for the OBDS System as `Europe/Berlin` + + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_all_parameters_asm.yaml](./dbcs_service_with_all_parameters_asm.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_all_parameters_asm.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_all_parameters_asm_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as ASM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml new file mode 100644 index 00000000..34811df7 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml @@ -0,0 +1,37 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + clusterName: "maa-cluster" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + diskRedundancy: "EXTERNAL" + hostName: "host01234" + initialDataStorageSizeInGB: 256 + pdbName: "PDB0123" + privateIp: "10.0.1.99" + shape: "VM.Standard2.2" + sshPublicKeys: + - "oci-publickey" + storageManagement: "ASM" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log new file mode 100644 index 00000000..eec26016 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log @@ -0,0 +1,242 @@ +[root@docker-test-server test]# cat dbcs_service_with_all_parameters_asm.yaml +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred-mumbai" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPaswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + diskRedundancy: "EXTERNAL" + hostName: "host01234" + initialDataStorageSizeInGB: 256 + licenseModel: "BRING_YOUR_OWN_LICENSE" + pdbName: "PDB0123" + privateIp: "10.0.1.99" + shape: "VM.Standard.E5.Flex" + sshPublicKeys: + - "oci-publickey" + storageManagement: "ASM" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" +[root@docker-test-server test]# +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f dbcs_service_with_all_parameters_asm.yaml +dbcssystem.database.oracle.com/dbcssystem-create created + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + + + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T02:59:43.691Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T02:59:44.410Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem DBSystem provisioning {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T02:59:52.341Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:00:52.845Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:01:53.382Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:02:53.737Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:03:54.188Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:04:54.545Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:05:55.030Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:06:55.429Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:07:55.789Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:08:56.188Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:09:56.905Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:10:57.308Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:11:58.068Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:12:58.444Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:13:58.840Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:14:59.194Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:15:59.772Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:17:00.249Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:18:00.599Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:19:00.881Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:20:01.121Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:21:01.488Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:22:01.874Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:23:02.726Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:24:03.634Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:25:03.978Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:26:04.450Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:27:04.763Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:28:05.246Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:29:05.825Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:30:06.398Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:31:07.256Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:32:07.551Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:33:08.057Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:34:08.452Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:35:08.772Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:36:09.216Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:37:09.584Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:38:09.881Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:39:10.602Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:40:10.869Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:41:11.301Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:42:12.468Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:43:12.732Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:44:13.243Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:45:13.582Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:46:13.873Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:47:14.440Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:48:14.941Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:49:15.381Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:50:16.038Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:51:16.335Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:52:16.785Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:53:17.374Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:54:17.675Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:55:18.054Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:56:18.623Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:57:19.033Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:58:19.611Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T03:59:20.320Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem system provisioned succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} + + + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-create +user/ $ k describe dbcssystems/dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-09T09:42:08Z + Generation: 2 + Resource Version: 117337682 + UID: cc31eb51-56bc-48f5-926b-2453710b1592 +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Pasword Secret: admin-password + Db Backup Config: + Auto Backup Enabled: true + Auto Backup Window: SLOT_FOUR + Recovery Windows In Days: 15 + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: cdb12 + Db Version: 19c + Db Workload: OLTP + Disk Redundancy: EXTERNAL + Display Name: dbsys123 + Host Name: host01234 + Initial Data Storage Size In GB: 256 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Pdb Name: PDB0123 + Private Ip: 10.0.1.99 + Shape: VM.Standard.E5.Flex + Ssh Public Keys: + oci-publickey + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Tde Wallet Password Secret: tde-password + Time Zone: Europe/Berlin + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyap33onviyojoimevpazf4wtbnfsi5v5izah2s365wmyka + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiav4nm27oy6tfbqqyukvcgba7nalyozrgwfvkt5f25fazq + Db Name: cdb12 + Db Unique Name: cdb12_z4b_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyanuajzgrh6u5qtvlui4e7jtfwbcnx7lcplw36dy4u4fza + Display Name: dbsys123 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyap33onviyojoimevpazf4wtbnfsi5v5izah2s365wmyka + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host01234 + Listener Port: 1521 + Scan Dns Name: host01234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Pdb Name: PDB0123 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htya6wvrskrlk2fazy4pa25jcbinks7vsjdv4kxf5t6nxcxq + Reco Storage Size In GB: 256 + Shape: VM.Standard.E5.Flex + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrq63rk37tfqyu64lwason4rczllxmd5nk5iovdzbqkkk2d4nwp5ka + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-09 09:42:14.521 +0000 UTC + Time Finished: 2024-12-09 10:32:30.77 +0000 UTC + Time Started: 2024-12-09 09:42:21.084 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md new file mode 100644 index 00000000..1bc560f4 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md @@ -0,0 +1,57 @@ +# Create OBDS with All Parameters with Storage Management as LVM + +In this use case, the an OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `dbcs_service_with_all_parameters_lvm.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` +- Recovery Windows for Backup retention in days as `15` +- Oracle Database Edition as `STANDARD_EDITION` +- Database Name as `db0130` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Redundancy of the ASM Disks as `EXTERNAL` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` +- Initial Size of the DATA Storage in GB as `256` +- License Model as `BRING_YOUR_OWN_LICENSE` +- Name of the PDB to be created as `PDB0123` +- Private IP explicitly assigned to be `10.0.1.99` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- Storage Management type as `LVM` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- TDE Wallet Secret as `tde-password` +- Time Zone for the OBDS System as `Europe/Berlin` + + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_all_parameters_lvm.yaml](./dbcs_service_with_all_parameters_lvm.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_all_parameters_lvm.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_all_parameters_lvm_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as LVM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml new file mode 100644 index 00000000..f76962d1 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml @@ -0,0 +1,37 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + diskRedundancy: "EXTERNAL" + hostName: "host01234" + initialDataStorageSizeInGB: 256 + licenseModel: "BRING_YOUR_OWN_LICENSE" + pdbName: "PDB0123" + privateIp: "10.0.1.99" + shape: "VM.Standard.E5.Flex" + sshPublicKeys: + - "oci-publickey" + storageManagement: "LVM" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm_sample_output.log new file mode 100644 index 00000000..f6946eff --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm_sample_output.log @@ -0,0 +1,266 @@ +[root@docker-test-server test]# cat dbcs_service_with_all_parameters_lvm.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:PHX-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" + dbAdminPaswordSecret: "admin-password" + dbBackupConfig: + autoBackupEnabled: True + autoBackupWindow: "SLOT_FOUR" + recoveryWindowsInDays: 15 + dbEdition: "STANDARD_EDITION" + dbName: "db0130" + dbVersion: "21c" + dbWorkload: "OLTP" + displayName: "dbsystem0130" + hostName: "host0130" + initialDataStorageSizeInGB: 256 + licenseModel: "BRING_YOUR_OWN_LICENSE" + pdbName: "PDB0130" + shape: "VM.Standard2.1" + sshPublicKeys: + - "oci-publickey" + storageManagement: "LVM" + subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + tags: + "TEST": "test_case_provision" + "CreatedBy": "MAA_TEAM" + tdeWalletPasswordSecret: "tde-password" + timeZone: "Europe/Berlin" +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f dbcs_service_with_all_parameters_lvm.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + + + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T01:28:57.125Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem DBSystem provisioning {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:29:04.321Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:30:04.972Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:31:05.417Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:32:05.728Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:33:06.284Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:34:06.763Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:35:07.237Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:36:07.594Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:37:08.416Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:38:08.724Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:39:08.998Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:40:09.408Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:41:10.348Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:42:10.845Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:43:11.152Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:44:11.560Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:45:11.927Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:46:12.217Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:47:12.442Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +2022-03-09T01:48:12.826Z INFO controller-runtime.manager.controller.dbcssystem DbcsSystem system provisioned succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-create", "namespace": "default"} +[root@docker-test-server test]# + + + + + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-09T01:28:56Z + Generation: 1 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:dbAdminPaswordSecret: + f:dbBackupConfig: + .: + f:autoBackupEnabled: + f:autoBackupWindow: + f:recoveryWindowsInDays: + f:dbEdition: + f:dbName: + f:dbVersion: + f:dbWorkload: + f:displayName: + f:hostName: + f:initialDataStorageSizeInGB: + f:licenseModel: + f:pdbName: + f:shape: + f:sshPublicKeys: + f:storageManagement: + f:subnetId: + f:tags: + .: + f:CreatedBy: + f:TEST: + f:tdeWalletPasswordSecret: + f:timeZone: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-09T01:28:56Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:id: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-09T01:48:12Z + Resource Version: 55235730 + UID: 53f67e5d-7725-4c8d-a3c2-53ac82f6ef11 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Db Backup Config: + Auto Backup Enabled: true + Auto Backup Window: SLOT_FOUR + Recovery Windows In Days: 15 + Db Edition: STANDARD_EDITION + Db Name: db0130 + Db Version: 21c + Db Workload: OLTP + Display Name: dbsystem0130 + Host Name: host0130 + Initial Data Storage Size In GB: 256 + License Model: BRING_YOUR_OWN_LICENSE + Pdb Name: PDB0130 + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Storage Management: LVM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Tde Wallet Password Secret: tde-password + Time Zone: Europe/Berlin + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: STANDARD_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqahugk47wa6hp36fwamqh24lv7bavbqleyerdjgpoublgq + Db Name: db0130 + Db Unique Name: db0130_phx1t6 + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htya4wwpjsm6bc4jlipqrxl7lpgm5dt7rjpfcwnuynslifra + Display Name: dbsystem0130 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htya3y2uepxpcpy4t2gv5ctnw3r2jkfaevxloydy5uilgpna + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: LVM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: Europe/Berlin + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrmfvsuabnnapzaxlpzxyipcfbqlquxd4yg7cfw57ectybunbjw4tq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-09 01:29:01.315 +0000 UTC + Time Finished: 2022-03-09 01:46:27.292 +0000 UTC + Time Started: 2022-03-09 01:29:13.294 +0000 UTC +Events: +[root@docker-test-server test]# + + diff --git a/docs/dbcs/provisioning/dbcs_service_with_kms.yaml b/docs/dbcs/provisioning/dbcs_service_with_kms.yaml new file mode 100644 index 00000000..691b17a1 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_kms.yaml @@ -0,0 +1,27 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "kmsdb" + displayName: "kmsdbsystem" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + hostName: "kmshost" + shape: "VM.Standard2.2" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + kmsConfig: + vaultName: "dbvault" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + keyName: "dbkey" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log new file mode 100644 index 00000000..7ddf7d2f --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log @@ -0,0 +1,91 @@ +kubectl describe dbcssystems.database.oracle.com/dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2025-01-09T18:10:30Z + Generation: 2 + Resource Version: 130640272 + UID: 85e39113-0a02-4cf6-84d8-2270c543b0bf +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Password Secret: admin-password + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: kmsdb + Db Version: 19c + Db Workload: OLTP + Display Name: kmsdbsystem + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: kmshost + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.2 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyai52ll4ifn52jcwdwpvv2exqqfa2wptypvi46wibx5sea + Kms Config: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Name: dbkey + Vault Name: dbvault + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiasjpqkykdchfksgas4k62cqsf6p5gkvubsj53fdokovnq + Db Name: kmsdb + Db Unique Name: kmsdb_7cb_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyarhrmiqzf4pari5sshhdglj6bpuijy3fupxvveblr2l6q + Display Name: kmsdbsystem + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyai52ll4ifn52jcwdwpvv2exqqfa2wptypvi46wibx5sea + Kms Details Status: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Id: ocid1.key.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljr63rcu5h6lmaeux752pzmp334zihovh3n2acags6zt37emab34yba + Key Name: dbkey + Management Endpoint: https://fbtxxaolaaavw-management.kms.ap-mumbai-1.oraclecloud.com + Vault Id: ocid1.vault.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljrbjokn2fwhh36tqzyog4yjrth3mj2emxea4fxmzw6z35zlmh65p2a + Vault Name: dbvault + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: kmshost + Listener Port: 1521 + Scan Dns Name: kmshost-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: kmsdb_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyaqbjqveoqzvn5dklbuc575xdrclsrkjt5juzzcelmuqla + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrcik5rtyygbv7qzzxqsmv6dvdwlfb7i2k3pitfqr2zomspcnkx7oa + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2025-01-09 18:10:41.171 +0000 UTC + Time Finished: 2025-01-09 19:31:17.126 +0000 UTC + Time Started: 2025-01-09 18:10:49.668 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md new file mode 100644 index 00000000..0d75297b --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md @@ -0,0 +1,44 @@ +# Deploy a DB System using OCI Oracle Base Database System (OBDS) with minimal parameters + +In this use case, an OCI Oracle Base Database System (OBDS) system is deployed using Oracle DB Operator OBDS controller using minimal required parameters in the .yaml file being used during the deployment. + +**NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `dbcs_service_with_minimal_parameters.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `cid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Name as `dbsystem1234` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Database Hostname Prefix as `host1234` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- domain `vcndns.oraclevcn.com` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` + + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_minimal_parameters.yaml](./dbcs_service_with_minimal_parameters.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f create_required.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_minimal_parameters_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml new file mode 100644 index 00000000..66e1c229 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml @@ -0,0 +1,23 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb1" + displayName: "dbsystem1234" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + hostName: "host1234" + shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log new file mode 100644 index 00000000..80860c51 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log @@ -0,0 +1,82 @@ +kubectl describe dbcssystems/dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-10T05:19:46Z + Generation: 2 + Resource Version: 117717259 + UID: 3ff13686-50ec-41e3-81c8-77bb6b5a8afa +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Pasword Secret: admin-password + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: cdb1 + Db Version: 19c + Db Workload: OLTP + Display Name: dbsystem1234 + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC +Events: diff --git a/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml b/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml new file mode 100644 index 00000000..7da5f729 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml @@ -0,0 +1,38 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create-with-pdb + namespace: default +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:US-ASHBURN-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "dbsys" + displayName: "dbsystem24" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "21c" + dbWorkload: "OLTP" + hostName: "host24" + shape: "VM.Standard3.Flex" + cpuCoreCount: 1 + domain: "subd215df3e6.k8stest.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua" + pdbConfigs: + - pdbName: "pdb_sauahuja_11" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "Finance" + - pdbName: "pdb_sauahuja_12" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "HR" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log new file mode 100644 index 00000000..15946a43 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log @@ -0,0 +1,137 @@ +2024-08-14T13:59:34Z INFO DbcsSystem system provisioned succesfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:35Z INFO DBInst after assignment {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}} +2024-08-14T13:59:36Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq"]} +2024-08-14T13:59:36Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-14T13:59:36Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11"} +2024-08-14T13:59:36Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:36Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:36Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11"} +2024-08-14T13:59:37Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq"} +2024-08-14T13:59:37Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:00:07Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:00:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:01:08Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:01:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:02:08Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:02:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "AVAILABLE"} +2024-08-14T14:02:38Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq"} +2024-08-14T14:02:39Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq"]} +2024-08-14T14:02:39Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-14T14:02:39Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12"} +2024-08-14T14:02:39Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T14:02:39Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T14:02:39Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12"} +2024-08-14T14:02:40Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q"} +2024-08-14T14:02:40Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:03:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:03:41Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:04:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:04:41Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:05:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:05:42Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:06:12Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "AVAILABLE"} +2024-08-14T14:06:12Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q"} + + +#kubectl describe dbcssystems.database.oracle.com +Name: dbcssystem-create-with-pdb +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-08-16T09:26:08Z + Generation: 1 + Resource Version: 68483815 + UID: 9dd15628-e47b-4d9c-8bc6-2388e51cba30 +Spec: + Db System: + Availability Domain: OLou:US-ASHBURN-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Cpu Core Count: 1 + Db Admin Pasword Secret: admin-password + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: dbsys + Db Version: 21c + Db Workload: OLTP + Display Name: dbsystem24 + Domain: subd215df3e6.k8stest.oraclevcn.com + Host Name: host24 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard3.Flex + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua + Kms Config: + Oci Config Map: oci-cred + Oci Secret: oci-privatekey + Pdb Configs: + Freeform Tags: + Department: Finance + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_11 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: HR + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_12 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password +Status: + Availability Domain: OLou:US-ASHBURN-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.iad.anuwcljsqlb5nxiaqfh3twuegmxxci5boocmowxd6kcczeq6e7jwqezfmbwq + Db Name: dbsys + Db Unique Name: dbsys_dss_iad + Db Workload: OLTP + Id: ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga + Display Name: dbsystem24 + Id: ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-k8s-test-ae2addeb0-regional + Domain Name: subd215df3e6.k8stest.oraclevcn.com + Host Name: host24 + Listener Port: 1521 + Scan Dns Name: host24-scan.subd215df3e6.k8stest.oraclevcn.com + Vcn Name: oke-vcn-quick-k8s-test-ae2addeb0 + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Department: Finance + Pdb Name: pdb_sauahuja_11 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4arzakcgum6mv7h6cqmxhepyrjzfs77mxhqt4f3gylxq + Should Pdb Admin Account Be Locked: false + Pdb Config Status: + Freeform Tags: + Department: HR + Pdb Name: pdb_sauahuja_12 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaiq6cyhxwqr4ad3pfn7g6e6nd2myiibj54tbg7vc27hfa + Should Pdb Admin Account Be Locked: false + Reco Storage Size In GB: 256 + Shape: VM.Standard3.Flex + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.iad.abuwcljscvomyvuthyc5bnmgi4myo565mbaghtjbhscgvabiy4tyzahjtiba + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-08-14 18:28:36.996 +0000 UTC + Time Finished: 2024-08-14 19:44:28.607 +0000 UTC + Time Started: 2024-08-14 18:28:45.134 +0000 UTC +Events: diff --git a/docs/dbcs/provisioning/delete_pdb.md b/docs/dbcs/provisioning/delete_pdb.md new file mode 100644 index 00000000..84d676bc --- /dev/null +++ b/docs/dbcs/provisioning/delete_pdb.md @@ -0,0 +1,50 @@ +# Delete PDB of an existing DBCS System + +In this use case, an existing OCI DBCS system deployed earlier is going to have PDB/PDBs deleted. Its a 2 Step operation. + +In order to create PDBs to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to delete PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` + +This example uses `deletepdb_in_existing_dbcs_system_list.yaml` to delete PDBs of a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- PDB Name to be deleted e.g `pdb_sauahuja_11` and `pdb_sauahuja_12` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [deletepdb_in_existing_dbcs_system_list.yaml](./deletepdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f deletepdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deletion of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +3. Remove DBCS Systems resource- +```bash +kubectl delete -f deletepdb_in_existing_dbcs_system_list.yaml +``` + +## Sample Output + +[Here](./deletepdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for deletion of PDBs from an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. \ No newline at end of file diff --git a/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml new file mode 100644 index 00000000..fed3ec6c --- /dev/null +++ b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml @@ -0,0 +1,13 @@ +kind: DbcsSystem +metadata: + name: dbcssystem-existing + namespace: default +spec: + id: "ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + pdbConfigs: + - pdbName: "pdb_sauahuja_11" + isDelete: true + - pdbName: "pdb_sauahuja_12" + isDelete: true \ No newline at end of file diff --git a/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log new file mode 100644 index 00000000..a4f75fa5 --- /dev/null +++ b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log @@ -0,0 +1,8 @@ +2024-07-01T12:34:44Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyaxx3o46rynl5vyduxilwxeeafndy4cwqtkywkhcws435a"]} +2024-07-01T12:34:44Z INFO Deleting pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_11"} +2024-07-01T12:34:44Z INFO PluggableDatabaseId is not specified, getting pluggable databaseID {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808"} +2024-07-01T12:34:45Z INFO Successfully deleted pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_11"} +2024-07-01T12:34:46Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyaxx3o46rynl5vyduxilwxeeafndy4cwqtkywkhcws435a"]} +2024-07-01T12:34:46Z INFO Deleting pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_12"} +2024-07-01T12:34:46Z INFO PluggableDatabaseId is not specified, getting pluggable databaseID {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808"} +2024-07-01T12:34:47Z INFO Successfully deleted pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_12"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/known_issues.md b/docs/dbcs/provisioning/known_issues.md new file mode 100644 index 00000000..2dc54a48 --- /dev/null +++ b/docs/dbcs/provisioning/known_issues.md @@ -0,0 +1,12 @@ +# Known Issues - Oracle DB Operator DBCS Controller + +Below are the known issues using the Oracle DB Operator DBCS Controller: + +1. There is a known issue related to the DB Version 19c, 12c and 11g when used with the Oracle DB Operator DBCS Controller. DB Version 21c and 18c work with the controller. +2. In order to scale up storage of an existing DBCS system, the steps will be: + * Bind the existing DBCS System to DBCS Controller. + * Apply the change to scale up its storage. + This causes issue. The actual real step sequence that work is + * Bind + * Apply Shape change + * Apply scale storage change diff --git a/docs/dbcs/provisioning/migrate_to_kms.md b/docs/dbcs/provisioning/migrate_to_kms.md new file mode 100644 index 00000000..0c5ee10c --- /dev/null +++ b/docs/dbcs/provisioning/migrate_to_kms.md @@ -0,0 +1,49 @@ +# Create and update KMS vault to an existing OBDS System already deployed in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier having encryption with TDE Wallet Password, will be migrated to have KMS Vault created and update OBDS System in OCI. Its a 2 Step operation. + +In order to create KMS Vaults to an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System (having encryption enabled with TDE Wallet password) to the OBDS Controller. +2. Apply the change to create KMS Vaults. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. It is also assumed that OBDS System you created earlier is using TDE Wallet password. + +As step 1, first bind the existing OBDS System to OBDS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-create 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +This example uses `dbcs_service_migrate_to_kms.yaml` to create KMS Vault to existing OBDS VMDB having encryption already enabled earlier with TDE Wallet using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyaoja4v2kx5rcfe5w2onndjfpqjhjoakxgwxo2sbgei5iq` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Existing `dbSystem` details (`compartmentId`,`dbAdminPasswordSecret`,`tdeWalletPasswordSecret`)used before to create OBDS system. +- kmsConfig - vaultName as `dbvault` as an example. +- kmsConfig - keyName as `dbkey` as an example. +- kmsConfig - compartmentId as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` as an example. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_migrate_to_kms.yaml](./dbcs_service_migrate_to_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_migrate_to_kms.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of KMS Vaults. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_migrate_to_kms.log) is the sample output for creation of KMS Vaults on an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md new file mode 100644 index 00000000..1f03ff9f --- /dev/null +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md @@ -0,0 +1,45 @@ +# Scale Down the shape of an existing OBDS System + +In this use case, an existing OCI OBDS system deployed earlier is scaled down for its shape using Oracle DB Operator OBDS controller. Its a 2 Step operation. + +In order to scale down an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to scale down its shape. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `scale_down_dbcs_system_shape.yaml` to scale down a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72` +- Database Admin Credential as `admin-password` +- Database Hostname Prefix as `host1234` +- Oracle VMDB target Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [scale_down_dbcs_system_shape.yaml](./scale_down_dbcs_system_shape.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f scale_down_dbcs_system_shape.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale down. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./scale_down_dbcs_system_shape_sample_output.log) is the sample output for scaling down the shape of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml new file mode 100644 index 00000000..f4394ddc --- /dev/null +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml @@ -0,0 +1,18 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" + shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log b/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log new file mode 100644 index 00000000..32e0a318 --- /dev/null +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log @@ -0,0 +1,153 @@ +[root@docker-test-server test]# kubectl apply -f scale_down_dbcs_system_shape.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T00:24:08.850Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:24:12.990Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:25:13.409Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:26:13.878Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:27:14.206Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:28:14.465Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:29:14.735Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:30:15.027Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:31:15.331Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:32:15.768Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:33:16.188Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:34:16.476Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:35:17.125Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:36:17.598Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:37:18.000Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T00:38:18.344Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-10T10:54:17Z + Generation: 2 + Resource Version: 117775637 + UID: c9da1245-3582-4926-b311-c24d75e75003 +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Pasword Secret: admin-password + Db Backup Config: + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Kms Config: + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md new file mode 100644 index 00000000..924a8517 --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md @@ -0,0 +1,45 @@ +# Scale UP the shape of an existing OBDS System + +In this use case, an existing OCI OBDS system deployed earlier is scaled up for its shape using Oracle DB Operator OBDS controller. Its a 2 Step operation. + +In order to scale up an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to scale up its shape. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `scale_up_dbcs_system_shape.yaml` to scale up a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72` +- Database Admin Credential as `admin-password` +- Database Hostname Prefix as `host1234` +- Oracle VMDB target Shape as `VM.Standard2.2` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [scale_up_dbcs_system_shape.yaml](./scale_up_dbcs_system_shape.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f scale_up_dbcs_system_shape.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./scale_up_dbcs_system_shape_sample_output.log) is the sample output for scaling up the shape of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml new file mode 100644 index 00000000..0be84c53 --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml @@ -0,0 +1,18 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" + shape: "VM.Standard2.2" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/scale_up_dbcs_system_shape_sample_output.log b/docs/dbcs/provisioning/scale_up_dbcs_system_shape_sample_output.log new file mode 100644 index 00000000..96b52924 --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_dbcs_system_shape_sample_output.log @@ -0,0 +1,351 @@ +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-08T23:27:48Z + Generation: 1 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:id: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-08T23:27:48Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:cpuCoreCount: + f:dbAdminPaswordSecret: + f:dbBackupConfig: + f:dbEdition: + f:dbName: + f:dbUniqueName: + f:dbVersion: + f:diskRedundancy: + f:displayName: + f:faultDomains: + f:hostName: + f:nodeCount: + f:shape: + f:sshPublicKeys: + f:subnetId: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager +apiVersion: database.oracle.com/v1alpha1 + Operation: Update + Time: 2022-03-08T23:27:52Z + Resource Version: 55191827 + UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 +Spec: + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq + Db Name: db0130 + Db Unique Name: db0130_phx1zn + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra + Display Name: dbsystem20220308221302 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + License Model: LICENSE_INCLUDED + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC + Time Finished: 2022-03-08 23:11:50.46 +0000 UTC + Time Started: 2022-03-08 22:13:16.995 +0000 UTC +Events: +[root@docker-test-server test]# + + + +[root@docker-test-server test]# cat scale_up_dbcs_system_shape.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:PHX-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" + dbAdminPaswordSecret: "admin-password" + hostName: "host0130" + shape: "VM.Standard2.2" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl apply -f scale_up_dbcs_system_shape.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-08T23:32:12.728Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:32:50.935Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:32:55.703Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:33:55.990Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:34:56.830Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:35:57.120Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:36:57.675Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:37:58.011Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:38:58.566Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:39:58.929Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:40:59.368Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:41:59.837Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:43:00.298Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:44:00.581Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:45:00.942Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-08T23:46:01.332Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} + + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-08T23:27:48Z + Generation: 2 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:dbAdminPaswordSecret: + f:hostName: + f:shape: + f:sshPublicKeys: + f:subnetId: + f:id: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-08T23:32:50Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:dbSystem: + f:cpuCoreCount: + f:dbBackupConfig: + f:dbEdition: + f:dbName: + f:dbUniqueName: + f:dbVersion: + f:diskRedundancy: + f:displayName: + f:faultDomains: + f:nodeCount: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-08T23:32:55Z + Resource Version: 55197836 + UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Host Name: host0130 + Shape: VM.Standard2.2 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq + Db Name: db0130 + Db Unique Name: db0130_phx1zn + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra + Display Name: dbsystem20220308221302 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + License Model: LICENSE_INCLUDED + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC + Time Finished: 2022-03-08 23:11:50.46 +0000 UTC + Time Started: 2022-03-08 22:13:16.995 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC + Time Finished: 2022-03-08 23:46:21.126 +0000 UTC + Time Started: 2022-03-08 23:33:52.109 +0000 UTC +Events: +[root@docker-test-server test]# + diff --git a/docs/dbcs/provisioning/scale_up_storage.md b/docs/dbcs/provisioning/scale_up_storage.md new file mode 100644 index 00000000..ff16cbf9 --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_storage.md @@ -0,0 +1,45 @@ +# Scale UP the storage of an existing OBDS System + +In this use case, an existing OCI OBDS system deployed earlier is scaled up for its storage using Oracle DB Operator OBDS controller. Its a 2 Step operation. + +In order to scale up storage of an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to scale up its storage. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `scale_up_storage.yaml` to scale up storage of an existing Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Hostname Prefix as `host1234` +- Target Data Storage Size in GBs as `512` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` + + +Use the file: [scale_up_storage.yaml](./scale_up_storage.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@test-server OBDS]# kubectl apply -f scale_storage.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./scale_up_storage_sample_output.log) is the sample output for scaling up the storage of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/scale_up_storage.yaml b/docs/dbcs/provisioning/scale_up_storage.yaml new file mode 100644 index 00000000..a2977157 --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_storage.yaml @@ -0,0 +1,19 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" + shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + initialDataStorageSizeInGB: 512 + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" \ No newline at end of file diff --git a/docs/dbcs/provisioning/scale_up_storage_sample_output.log b/docs/dbcs/provisioning/scale_up_storage_sample_output.log new file mode 100644 index 00000000..e703391e --- /dev/null +++ b/docs/dbcs/provisioning/scale_up_storage_sample_output.log @@ -0,0 +1,113 @@ +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-10T10:54:17Z + Generation: 3 + Resource Version: 117788129 + UID: c9da1245-3582-4926-b311-c24d75e75003 +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Pasword Secret: admin-password + Db Backup Config: + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Initial Data Storage Size In GB: 512 + Kms Config: + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr36bt7ot5oq3otch4bu2axn3azkicot4zuwgwmxeupxr4siisydja + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:49.369 +0000 UTC + Time Finished: 2024-12-10 11:58:45.01 +0000 UTC + Time Started: 2024-12-10 11:44:55.544 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxdpmmaipuqke5yx3szyfnf2zwkfptz3jevlq3coicecfjihnm4kq + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:55.255 +0000 UTC + Time Finished: 2024-12-10 11:58:25.229 +0000 UTC + Time Started: 2024-12-10 11:44:57.743 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/terminate_dbcs_system.md b/docs/dbcs/provisioning/terminate_dbcs_system.md new file mode 100644 index 00000000..f3b19cbc --- /dev/null +++ b/docs/dbcs/provisioning/terminate_dbcs_system.md @@ -0,0 +1,46 @@ +# Terminate an existing Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier is terminated using Oracle DB Operator OBDS controller. Its a 2 Step operation. + +In order to terminate an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to terminate this OBDS System. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `terminate_dbcs_system.yaml` to terminated a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [terminate_dbcs_system.yaml](./terminate_dbcs_system.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@test-server OBDS]# kubectl apply -f terminate_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-terminate created + + +[root@test-server OBDS]# kubectl delete -f terminate_dbcs_system.yaml +dbcssystem.database.oracle.com "dbcssystem-terminate" deleted +``` + +2. Check the logs of Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for an update on the terminate operation been accepted. + +``` +[root@test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +3. Check and confirm if the existing OCI OBDS system is NO longer available after sometime because of termination: + +``` +[root@test-server OBDS]# kubectl describe dbcssystems.database.oracle.com dbcssystem-terminate +``` + +## Sample Output + +[Here](./terminate_dbcs_system_sample_output.log) is the sample output for terminating an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/terminate_dbcs_system.yaml b/docs/dbcs/provisioning/terminate_dbcs_system.yaml new file mode 100644 index 00000000..a4a2f105 --- /dev/null +++ b/docs/dbcs/provisioning/terminate_dbcs_system.yaml @@ -0,0 +1,9 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-terminate +spec: + hardLink: True + id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" diff --git a/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log b/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log new file mode 100644 index 00000000..383f823c --- /dev/null +++ b/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log @@ -0,0 +1,248 @@ +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-08T23:27:48Z + Generation: 5 + Managed Fields: + API Version: database.oracle.com/v4 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:dbSystem: + f:cpuCoreCount: + f:dbBackupConfig: + f:dbEdition: + f:dbName: + f:dbUniqueName: + f:dbVersion: + f:diskRedundancy: + f:displayName: + f:faultDomains: + f:nodeCount: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-08T23:32:55Z + API Version: database.oracle.com/v4 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:dbAdminPaswordSecret: + f:hostName: + f:licenseModel: + f:shape: + f:sshPublicKeys: + f:subnetId: + f:id: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-09T01:15:19Z + Resource Version: 55226409 + UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Host Name: host0130 + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Edition: ENTERPRISE_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq + Db Name: db0130 + Db Unique Name: db0130_phx1zn + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra + Display Name: dbsystem20220308221302 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC + Time Finished: 2022-03-08 23:11:50.46 +0000 UTC + Time Started: 2022-03-08 22:13:16.995 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC + Time Finished: 2022-03-08 23:46:21.126 +0000 UTC + Time Started: 2022-03-08 23:33:52.109 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC + Time Finished: 2022-03-09 00:38:59.526 +0000 UTC + Time Started: 2022-03-09 00:25:15.578 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrbaqah6qktukvdlnx66fp2hlevegryfuppsshkqemfcdjtwfwaq3q + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2022-03-09 00:48:54.849 +0000 UTC + Time Finished: 2022-03-09 01:03:10.885 +0000 UTC + Time Started: 2022-03-09 00:49:05.911 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrvhcpme5ijlsxup22kuumjuzn367vdxwhblv2nxpwshfwnig5au7a + Operation Type: Update DB System License Type + Percent Complete: 100 + Time Accepted: 2022-03-09 01:16:16.991 +0000 UTC + Time Finished: 2022-03-09 01:17:05.025 +0000 UTC + Time Started: 2022-03-09 01:16:24.716 +0000 UTC +Events: +[root@docker-test-server test]# + + +[root@docker-test-server test]# cat terminate_dbcs_system.yaml +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-terminate +spec: + hardLink: True + id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f terminate_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-terminate created +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T01:24:18.773Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} +2022-03-09T01:24:18.793Z INFO controller-runtime.manager.controller.dbcssystem Finalizer registered successfully. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} +2022-03-09T01:24:22.461Z INFO controller-runtime.manager.controller.dbcssystem Sync information from remote DbcsSystem System successfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} + + + + + + +[root@docker-test-server test]# kubectl delete -f terminate_dbcs_system.yaml +dbcssystem.database.oracle.com "dbcssystem-terminate" deleted +[root@docker-test-server test]# + + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T01:25:05.199Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} +2022-03-09T01:25:05.199Z INFO controller-runtime.manager.controller.dbcssystem Terminate DbcsSystem Database: {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} +2022-03-09T01:25:06.920Z INFO controller-runtime.manager.controller.dbcssystem Finalizer unregistered successfully. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-terminate", "namespace": "default"} + + +[root@docker-test-server test]# kubectl delete dbcssystems.database.oracle.com dbcssystem-existing +dbcssystem.database.oracle.com "dbcssystem-existing" deleted +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Error from server (NotFound): dbcssystems.database.oracle.com "dbcssystem-existing" not found +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl delete dbcssystems.database.oracle.com dbcssystem-create +dbcssystem.database.oracle.com "dbcssystem-create" deleted +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl delete dbcssystems.database.oracle.com dbcssystem-create +Error from server (NotFound): dbcssystems.database.oracle.com "dbcssystem-create" not found +[root@docker-test-server test]# diff --git a/docs/dbcs/provisioning/update_license.md b/docs/dbcs/provisioning/update_license.md new file mode 100644 index 00000000..6f32c31b --- /dev/null +++ b/docs/dbcs/provisioning/update_license.md @@ -0,0 +1,46 @@ +# Update License type of an existing OBDS System + +In this use case, the license type of an existing OCI OBDS system deployed earlier is changed from `License Included` to `Bring your own license` using Oracle DB Operator OBDS controller. Its a 2 Step operation. + +In order to update the license type an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to change its license type. + +**NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `update_license.yaml` to change the license type of a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Target license model as `BRING_YOUR_OWN_LICENSE` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Hostname Prefix as `host1234` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [update_license.yaml](./update_license.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@test-server OBDS]# kubectl apply -f update_license.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./update_license_sample_output.log) is the sample output for updating the license type an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/update_license.yaml b/docs/dbcs/provisioning/update_license.yaml new file mode 100644 index 00000000..7c192b6b --- /dev/null +++ b/docs/dbcs/provisioning/update_license.yaml @@ -0,0 +1,20 @@ + apiVersion: database.oracle.com/v4 + kind: DbcsSystem + metadata: + name: dbcssystem-existing + spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" + licenseModel: "BRING_YOUR_OWN_LICENSE" + shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + initialDataStorageSizeInGB: 512 + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/update_license_sample_output.log b/docs/dbcs/provisioning/update_license_sample_output.log new file mode 100644 index 00000000..7bed4383 --- /dev/null +++ b/docs/dbcs/provisioning/update_license_sample_output.log @@ -0,0 +1,388 @@ +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-08T23:27:48Z + Generation: 4 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:dbSystem: + f:cpuCoreCount: + f:dbBackupConfig: + f:dbEdition: + f:dbName: + f:dbUniqueName: + f:dbVersion: + f:diskRedundancy: + f:displayName: + f:faultDomains: + f:nodeCount: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-08T23:32:55Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:dbAdminPaswordSecret: + f:hostName: + f:initialDataStorageSizeInGB: + f:shape: + f:sshPublicKeys: + f:subnetId: + f:id: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-09T00:48:11Z + Resource Version: 55222013 + UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Host Name: host0130 + Initial Data Storage Size In GB: 512 + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Edition: ENTERPRISE_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq + Db Name: db0130 + Db Unique Name: db0130_phx1zn + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra + Display Name: dbsystem20220308221302 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + License Model: LICENSE_INCLUDED + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC + Time Finished: 2022-03-08 23:11:50.46 +0000 UTC + Time Started: 2022-03-08 22:13:16.995 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC + Time Finished: 2022-03-08 23:46:21.126 +0000 UTC + Time Started: 2022-03-08 23:33:52.109 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC + Time Finished: 2022-03-09 00:38:59.526 +0000 UTC + Time Started: 2022-03-09 00:25:15.578 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrbaqah6qktukvdlnx66fp2hlevegryfuppsshkqemfcdjtwfwaq3q + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2022-03-09 00:48:54.849 +0000 UTC + Time Finished: 2022-03-09 01:03:10.885 +0000 UTC + Time Started: 2022-03-09 00:49:05.911 +0000 UTC +Events: +[root@docker-test-server test]# + + + +[root@docker-test-server test]# cat update_license.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + dbSystem: + availabilityDomain: "OLou:PHX-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" + dbAdminPaswordSecret: "admin-password" + hostName: "host0130" + licenseModel: "BRING_YOUR_OWN_LICENSE" + shape: "VM.Standard2.1" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl apply -f update_license.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl get ns + +kubectl get allNAME STATUS AGE +cert-manager Active 13d +default Active 139d +kube-node-lease Active 139d +kube-public Active 139d +kube-system Active 139d +oracle-database-operator-system Active 13d +shns Active 88d +[root@docker-test-server test]# +[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d +[root@docker-test-server test]# + + +[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system +. +. +2022-03-09T01:15:19.090Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T01:15:23.534Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T01:16:23.931Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} +2022-03-09T01:17:24.701Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} + + + + +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2022-03-08T23:27:48Z + Generation: 5 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + f:lastSuccessfulSpec: + f:spec: + f:dbSystem: + f:cpuCoreCount: + f:dbBackupConfig: + f:dbEdition: + f:dbName: + f:dbUniqueName: + f:dbVersion: + f:diskRedundancy: + f:displayName: + f:faultDomains: + f:nodeCount: + f:status: + .: + f:availabilityDomain: + f:cpuCoreCount: + f:dataStoragePercentage: + f:dataStorageSizeInGBs: + f:dbEdition: + f:dbInfo: + f:displayName: + f:id: + f:licenseModel: + f:network: + .: + f:clientSubnet: + f:domainName: + f:hostName: + f:listenerPort: + f:scanDnsName: + f:vcnName: + f:nodeCount: + f:recoStorageSizeInGB: + f:shape: + f:state: + f:storageManagement: + f:subnetId: + f:timeZone: + f:workRequests: + Manager: manager + Operation: Update + Time: 2022-03-08T23:32:55Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:dbSystem: + .: + f:availabilityDomain: + f:compartmentId: + f:dbAdminPaswordSecret: + f:hostName: + f:licenseModel: + f:shape: + f:sshPublicKeys: + f:subnetId: + f:id: + f:ociConfigMap: + f:ociSecret: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-03-09T01:15:19Z + Resource Version: 55226409 + UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 +Spec: + Db System: + Availability Domain: OLou:PHX-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Db Admin Pasword Secret: admin-password + Host Name: host0130 + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:PHX-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Edition: ENTERPRISE_EDITION + Db Info: + Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq + Db Name: db0130 + Db Unique Name: db0130_phx1zn + Db Workload: OLTP + Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra + Display Name: dbsystem20220308221302 + Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: k8test-pubvcn + Domain Name: k8testpubvcn.k8test.oraclevcn.com + Host Name: host0130 + Listener Port: 1521 + Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com + Vcn Name: k8test + Node Count: 1 + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC + Time Finished: 2022-03-08 23:11:50.46 +0000 UTC + Time Started: 2022-03-08 22:13:16.995 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC + Time Finished: 2022-03-08 23:46:21.126 +0000 UTC + Time Started: 2022-03-08 23:33:52.109 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC + Time Finished: 2022-03-09 00:38:59.526 +0000 UTC + Time Started: 2022-03-09 00:25:15.578 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrbaqah6qktukvdlnx66fp2hlevegryfuppsshkqemfcdjtwfwaq3q + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2022-03-09 00:48:54.849 +0000 UTC + Time Finished: 2022-03-09 01:03:10.885 +0000 UTC + Time Started: 2022-03-09 00:49:05.911 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrvhcpme5ijlsxup22kuumjuzn367vdxwhblv2nxpwshfwnig5au7a + Operation Type: Update DB System License Type + Percent Complete: 100 + Time Accepted: 2022-03-09 01:16:16.991 +0000 UTC + Time Finished: 2022-03-09 01:17:05.025 +0000 UTC + Time Started: 2022-03-09 01:16:24.716 +0000 UTC +Events: +[root@docker-test-server test]# \ No newline at end of file diff --git a/docs/dbcs/usecase01/README.md b/docs/dbcs/usecase01/README.md new file mode 100644 index 00000000..4349e211 --- /dev/null +++ b/docs/dbcs/usecase01/README.md @@ -0,0 +1,199 @@ + +# Makefile for the dbcs automation creation + +This [makefile](#makefile) helps to speed up the **DBCS** creation. Edit all the credentials related to your tenancy in the configmap target section and update the **NAMESPACE** variable. Specify the oci pem key that you have created during ocicli configuration **OCIPEM** + +```makefile +[...] +ONAMESPACE=oracle-database-operator-system +NAMESPACE=[MY_NAMESPACE] +OCIPEN=[PATH_TO_OCI_API_KEY_PEM] +[...] +configmap: + $(KUBECTL) create configmap oci-cred \ + --from-literal=tenancy=[MY_TENANCY_ID] + --from-literal=user=[MY_USER_ID] \ + --from-literal=fingerprint=[MY_FINGER_PRINT] \ + --from-literal=region=[MY_REGION] -n $(NAMESPACE) + +[...] +``` +Specify the admin password and the tde password in adminpass and tdepass + +```makefile +adminpass: + echo "[SPECIFY_PASSWORD_HERE]" > ./admin-password + $(KUBECTL) create secret generic admin-password --from-file=./admin-password -n $(NAMESPACE) + $(RM) ./admin-password + +tdepass: + echo "[SPECIFY_PASSWORD_HERE]" > ./tde-password + $(KUBECTL) create secret generic tde-password --from-file=./tde-password -n $(NAMESPACE) + $(RM) ./tde-password +``` + +Execute the following targets step1 step2 step3 step4 step5 to setup secrets and certificates. + +```bash +make step1 +make step2 +make step3 +make step4 +make step5 +``` + +Create the file **dbcs_service_with_minimal_parameters.yaml** + +```yaml +apiVersion: database.oracle.com/v1alpha1 +kind: DbcsSystem +metadata: + name: dbcssystem-create + namespace: [MY_NAMESPACE] +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:EU-MILAN-1-AD-1" + compartmentId: "[MY_COMPARTMENT_ID]" + dbAdminPaswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "testdb" + displayName: "dbsystem_example" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + hostName: "host_example_1205" + shape: "VM.Standard2.1" + domain: "example.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "[MY_SUBNET_ID]" + +``` + +Execute the target make file create **make create** or apply directly the above yaml file **kubectl apply -f dbcs_service_with_minimal_parameters.yaml** to create DBCS . Verify the DBCS creation by executing **kubectl get DbcsSystem -n [MY_NAMESPACE]** + +``` +kubectl get DbcsSystem -n [MY_NAMESPACE] +NAME AGE +dbcssystem-create 52m +``` +Use the describe command to verify the status and the attributes of the dbcs system created + +```bash +kubectl describe DbcsSystem dbcssystem-create -n [...] +``` +```text +Name: dbcssystem-create +Namespace: pdbnamespace +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration: + {"apiVersion":"database.oracle.com/v1alpha1","kind":"DbcsSystem","metadata":{"annotations":{},"name":"dbcssystem-create","namespace":"pdbn...}} + +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-03-15T14:53:02Z + + Db System: + Availability Domain: OLou:EU-MILAN-1-AD-1 + Compartment Id: [MY_COMPARTMENT_ID] + Db Admin Pasword Secret: admin-password + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: testdb + Db Version: 19c + Db Workload: OLTP + Display Name: "dbsystem_example" + Domain: example.com + Host Name: host_example_1205 + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.1 + Ssh Public Keys: + oci-publickey + Subnet Id: [MY_SUBNET_ID] + Oci Config Map: oci-cred + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:EU-MILAN-1-AD-1 + Cpu Core Count: 1 +``` +## makefile + +```Makefile +ONAMESPACE=oracle-database-operator-system +NAMESPACE=[MY_NAMESPACE] +OCIPEN=[PATH_TO_OCI_API_KEY_PEM] +KUBECTL=/usr/bin/kubectl +CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +RM=/usr/bin/rm + +certmanager: + $(KUBECTL) apply -f $(CERTMANAGER) + +prereq: step1 step2 step3 step4 step5 + +step1: configmap +step2: ociprivkey +step3: adminpass +step4: tdepass +step5: ocipubkey + + +configmap: + $(KUBECTL) create configmap oci-cred \ + --from-literal=tenancy=[MY_TENANCY_ID] + --from-literal=user=[MY_USER_ID] \ + --from-literal=fingerprint=[MY_FINGER_PRINT] \ + --from-literal=region=[MY_REGION] -n $(NAMESPACE) + +ociprivkey: + $(KUBECTL) create secret generic oci-privatekey --from-file=privatekey=[PATH_TO_OCI_API_KEY_PEM] -n $(NAMESPACE) + +adminpass: + echo "WElcome_12##" > ./admin-password + $(KUBECTL) create secret generic admin-password --from-file=./admin-password -n $(NAMESPACE) + $(RM) ./admin-password + +tdepass: + echo "WElcome_12##" > ./tde-password + $(KUBECTL) create secret generic tde-password --from-file=./tde-password -n $(NAMESPACE) + $(RM) ./tde-password + +ocipubkey: + #ssh-keygen -N "" -C "DBCS_System"-`date +%Y%m` -P "" + $(KUBECTL) create secret generic oci-publickey --from-file=publickey=/home/oracle/.ssh/id_rsa.pub -n $(NAMESPACE) + +clean: delprivkey delpubkey deladminpass delconfigmap deltdepass + +delconfigmap: + $(KUBECTL) delete configmap oci-cred -n $(NAMESPACE) +delprivkey: + $(KUBECTL) delete secret oci-privatekey -n $(NAMESPACE) +delpubkey: + $(KUBECTL) delete secret oci-publickey -n $(NAMESPACE) +deltdepass: + $(KUBECTL) delete secret tde-password -n $(NAMESPACE) +deladminpass: + $(KUBECTL) delete secret admin-password -n $(NAMESPACE) +checkmap: + $(KUBECTL) get configmaps oci-cred -o yaml -n $(NAMESPACE) |grep -A 5 -B 2 "^data:" +checkdbcs: + $(KUBECTL) describe dbcssystems.database.oracle.com dbcssystem-create -n $(NAMESPACE) +getall: + $(KUBECTL) get all -n $(NAMESPACE) +getmaps: + $(KUBECTL) get configmaps oci-cred -n $(NAMESPACE) -o yaml +descdbcss: + $(KUBECTL) describe dbcssystems.database.oracle.com dbcssystem-create -n $(NAMESPACE) +getdbcs: + $(KUBECTL) get DbcsSystem -n $(NAMESPACE) +create: + $(KUBECTL) apply -f dbcs_service_with_minimal_parameters.yaml -n $(NAMESPACE) +xlog1: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(ONAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(ONAMESPACE) +xlog2: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(ONAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1` -n $(ONAMESPACE) +xlog3: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(ONAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(ONAMESPACE) +``` \ No newline at end of file diff --git a/doc/installation/OPERATOR_INSTALLATION_README.md b/docs/installation/OPERATOR_INSTALLATION_README.md similarity index 100% rename from doc/installation/OPERATOR_INSTALLATION_README.md rename to docs/installation/OPERATOR_INSTALLATION_README.md diff --git a/docs/multitenant/NamespaceSeg.md b/docs/multitenant/NamespaceSeg.md new file mode 100644 index 00000000..6738fe56 --- /dev/null +++ b/docs/multitenant/NamespaceSeg.md @@ -0,0 +1,14 @@ + + +# Namespace segregation + +With the namespace segregation pdb controller and cdb controller run in different namespaces. The new functionality introduces a new parameter (the cdb namespace) in pdb crd definition. In case you don't need the namespace segregation you have to sepcify the namespace name that you are using for yours crd and pods anyway. Refer to usercase01 and usecase02 to see single namespace configuration. Refer to usecase03 to see examples of namespace segregation. + +# Secrets + +In order to use multiple namespace we need to create approriate secrets in each namespace. Tls certificate secrets must be created in all namespaces (db-ca db-tls). + +![general_schema](./images/K8S_NAMESPACE_SEG.png) + + + diff --git a/docs/multitenant/README.md b/docs/multitenant/README.md new file mode 100644 index 00000000..0d3057fc --- /dev/null +++ b/docs/multitenant/README.md @@ -0,0 +1,11 @@ +# Multitenant Controllers + +Starting from OraOperator version 1.2.0, there are two classes of multitenant controllers: one based on [ORDS](https://www.oracle.com/uk/database/technologies/appdev/rest.html) and another based on a dedicated REST server for the operator, called LREST. In both cases, the features remains unchanged (a part from CRD name changes). A pod running a REST server (either LREST or ORDS) acts as the proxy server connected to the container database (CDB) for all incoming kubectl requests. We plan to discontinue the ORDS based controller, in the next release; no regression (a part form CRD name changes). + +## What are the differences + +- Regarding the YAML file, the parameters for the existing functionalities are unchanged. +- The **CRD** names are different: for controllers based on [ORDS](./ords-based/README.md), we have **PDB** and **CDB**, while for controllers based on [LREST](./lrest-based/README.md), we have **LRPDB** and **LREST**. +- If you use an LREST-based controller, there is no need to manually create the REST server pod. The image is available for download on OCR. +- Controllers based on **LREST** allow you to manage PDB parameters using kubectl. +- ORDS controllers currently do not support ORDS version 24.1. diff --git a/docs/multitenant/lrest-based/README.md b/docs/multitenant/lrest-based/README.md new file mode 100644 index 00000000..d9b72a9d --- /dev/null +++ b/docs/multitenant/lrest-based/README.md @@ -0,0 +1,501 @@ + + + +# LREST BASED MULTITENANT CONTROLLERS FOR PDB LIFE CYCLE MANAGEMENT + + +- [LREST BASED MULTITENANT CONTROLLERS FOR PDB LIFE CYCLE MANAGEMENT](#lrest-based-multitenant-controllers-for-pdb-life-cycle-management) + - [STEP BY STEP CONFIGURATION](#step-by-step-configuration) + - [Multiple namespace setup](#multiple-namespace-setup) + - [Create the operator](#create-the-operator) + - [Container database setup](#container-database-setup) + - [Apply rolebinding](#apply-rolebinding) + - [Certificate and credentials](#certificate-and-credentials) + - [Private key 🔑](#private-key-) + - [Public Key 🔑](#public-key-) + - [Certificates](#certificates) + - [Create secrets for certificate and keys](#create-secrets-for-certificate-and-keys) + - [Create secrets with encrypted password](#create-secrets-with-encrypted-password) + - [Create lrest pod](#create-lrest-pod) + - [Create PDB](#create-pdb) + - [pdb config map ](#pdb-config-map) + - [Open PDB](#open-pdb) + - [Close PDB](#close-pdb) + - [Clone PDB](#clone-pdb) + - [Unplug PDB](#unplug-pdb) + - [Plug PDB](#plug-pdb) + - [Delete PDB](#delete-pdb) + - [Map PDB](#map-pdb) + + + + + +**Lrpdb** and **lrest** are two controllers for PDB lifecycle management (**PDBLCM**). They rely on a dedicated REST server (Lite Rest Server) Container image to run. The `lrest` controller is available on the Oracle Container Registry (OCR). The container database can be anywhere (on-premises or in the Cloud). + +![generaleschema](./images/Generalschema2.jpg) + +## STEP BY STEP CONFIGURATION +Complete each of these steps in the order given. + +### Multiple namespace setup + +Before proceeding with controllers setup, ensure that the Oracle Database Operator (operator) is configured to work with multiple namespaces, as specified in the [README](../../../README.md). +In this document, each controller is running in a dedicated namespace: lrest controller is running in **cdbnamespace** , lrpdb controller is running in **pdbnamespace**. The [usecase directory](./usecase/README.md) contains all the files reported in this document. + +Configure the **WACTH_NAMESPACE** list of the operator `yaml` file + +```bash +sed -i 's/value: ""/value: "oracle-database-operator-system,pdbnamespace,cdbnamespace"/g' oracle-database-operator.yaml +``` + +### Create the operator +Run the following command: + +```bash +kubectl apply -f oracle-database-operator.yaml +``` +Check the controller: +```bash +kubectl get pods -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +oracle-database-operator-controller-manager-796c9b87df-6xn7c 1/1 Running 0 22m +oracle-database-operator-controller-manager-796c9b87df-sckf2 1/1 Running 0 22m +oracle-database-operator-controller-manager-796c9b87df-t4qns 1/1 Running 0 22m +``` +### Container database setup + +On the container database, use the following commands to configure the account for PDB administration: + +```sql +alter session set "_oracle_script"=true; +create user identified by ; +grant create session to container=all; +grant sysdba to container=all; +``` + + +### Apply rolebinding + + +Apply the following files : [`pdbnamespace_binding.yaml`](./usecase/pdbnamespace_binding.yaml) [`cdbnamespace_binding.yaml`](./usecase/cdbnamespace_binding.yaml) +```bash +kubectl apply -f pdbnamespace_binding.yaml +kubectl apply -f cdbnamespace_binding.yaml +``` + +### Certificate and credentials +You must create the public key, private key, certificates and Kubernetes Secrets for the security configuration. + +#### Private key 🔑 +> Note: Only private key **PCKS8** format is supported by LREST controllers. Before you start configuration, ensure that you can use it. If you are using [`openssl3`](https://docs.openssl.org/master/) then `pcks8` is generated by default. If it is not already generated, then use the following command to create a `pcks8` private key + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 -out private.key +``` +#### Public Key 🔑 +Create the public key. + +```bash +/usr/bin/openssl rsa -in private.key -outform PEM -pubout -out public.pem +``` +#### Certificates +Create certificates. +```bash +openssl req -new -x509 -days 365 -key private.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=oracle Root CA" -out ca.crt +``` +```bash +openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=cdb-dev-lrest.cdbnamespace" -out server.csr +``` +```bash +/usr/bin/echo "subjectAltName=DNS:cdb-dev-lrest.cdbnamespace,DNS:www.example.com" > extfile.txt +``` +```bash +/usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey private.key -CAcreateserial -out tls.crt +``` + +### Create secrets for certificate and keys +Create the Kubernetes Secrets. + +```bash +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system +kubectl create secret generic db-ca --from-file="ca.crt" -n oracle-database-operator-system +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n cdbnamespace +kubectl create secret generic db-ca --from-file="ca.crt" -n cdbnamespace +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n pdbnamespace +kubectl create secret generic db-ca --from-file="ca.crt" -n pdbnamespace +``` + +```bash +kubectl create secret tls prvkey --key="private.key" --cert=ca.crt -n cdbnamespace +kubectl create secret generic pubkey --from-file=publicKey=public.pem -n cdbnamespace +kubectl create secret generic prvkey --from-file=privateKey="private.key" -n pdbnamespace +``` + +### Create secrets with encrypted password + +In this example, we create the Secrets for each credential (username and password) + +| secret usr | secrets pwd | credential description | +| -----------|-------------|-----------------------------------------------------------| +| **dbuser** |**dbpass** | the administrative user created on the container database | +| **wbuser** |**wbpass** | the user for https authentication | +| **pdbusr** |**pdbpwd** | the administrative user of the pdbs | + + +```bash +echo "[ADMINUSERNAME]" > dbuser.txt +echo "[ADMINUSERNAME PASSWORD]" > dbpass.txt +echo "[WEBUSER]" > wbuser.txt +echo "[WEBUSER PASSWORD]" > wbpass.txt +echo "[PDBUSERNAME]" > pdbusr.txt +echo "[PDBUSERNAME PASSWORD]" > pdbpwd.txt + +## Encrypt the credentials +openssl rsautl -encrypt -pubin -inkey public.pem -in dbuser.txt |base64 > e_dbuser.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in dbpass.txt |base64 > e_dbpass.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in wbuser.txt |base64 > e_wbuser.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in wbpass.txt |base64 > e_wbpass.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in pdbusr.txt |base64 > e_pdbusr.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in pdbpwd.txt |base64 > e_pdbpwd.txt + +kubectl create secret generic dbuser --from-file=e_dbuser.txt -n cdbnamespace +kubectl create secret generic dbpass --from-file=e_dbpass.txt -n cdbnamespace +kubectl create secret generic wbuser --from-file=e_wbuser.txt -n cdbnamespace +kubectl create secret generic wbpass --from-file=e_wbpass.txt -n cdbnamespace +kubectl create secret generic wbuser --from-file=e_wbuser.txt -n pdbnamespace +kubectl create secret generic wbpass --from-file=e_wbpass.txt -n pdbnamespace +kubectl create secret generic pdbusr --from-file=e_pdbusr.txt -n pdbnamespace +kubectl create secret generic pdbpwd --from-file=e_pdbpwd.txt -n pdbnamespace + +rm dbuser.txt dbpass.txt wbuser.txt wbpass.txt pdbusr.txt pdbpwd.txt \ + e_dbuser.txt e_dbpass.txt e_wbuser.txt e_wbpass.txt e_pdbusr.txt e_pdbpwd.txt +``` + +### Create lrest pod + +To create the REST pod and monitor its processing, use the `yaml` file [`create_lrest_pod.yaml`](./usecase/create_lrest_pod.yaml) + +Ensure that you update the **lrestImage** with the latest version available on the [Oracle Container Registry (OCR)](https://container-registry.oracle.com/ords/f?p=113:4:104288359787984:::4:P4_REPOSITORY,AI_REPOSITORY,AI_REPOSITORY_NAME,P4_REPOSITORY_NAME,P4_EULA_ID,P4_BUSINESS_AREA_ID:1283,1283,This%20image%20is%20part%20of%20and%20for%20use%20with%20the%20Oracle%20Database%20Operator%20for%20Kubernetes,This%20image%20is%20part%20of%20and%20for%20use%20with%20the%20Oracle%20Database%20Operator%20for%20Kubernetes,1,0&cs=3076h-hg1qX3eJANBcUHBNBCmYWjMvxLkZyTAhDn2e8VR8Gxb_a-I8jZLhf9j6gmnimHwlP_a0OQjX6vjBfSAqQ) + +```bash +--> for amd64 +lrestImage: container-registry.oracle.com/database/operator:lrest-241210-amd64 + +--> for arm64 +lrestImage: container-registry.oracle.com/database/operator:lrest-241210-arm64 +``` + +```bash +kubectl apply -f create_lrest_pod.yaml +``` + +monitor the file processing: + +```bash +kubectl get pods -n cdbnamespace --watch +NAME READY STATUS RESTARTS AGE +cdb-dev-lrest-rs-9gvx2 0/1 Pending 0 0s +cdb-dev-lrest-rs-9gvx2 0/1 Pending 0 0s +cdb-dev-lrest-rs-9gvx2 0/1 ContainerCreating 0 0s +cdb-dev-lrest-rs-9gvx2 1/1 Running 0 2s + +kubectl get lrest -n cdbnamespace +NAME CDB NAME DB SERVER DB PORT TNS STRING REPLICAS STATUS MESSAGE +cdb-dev DB12 (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) 1 Ready +``` + +Check the Pod logs: + +```bash +/usr/local/go/bin/kubectl logs -f `/usr/local/go/bin/kubectl get pods -n cdbnamespace|grep lrest|cut -d ' ' -f 1` -n cdbnamespace +``` + +Output example: + +```text +... +... +2024/09/05 12:44:09 wallet file /opt/oracle/lrest/walletfile exists completed +2024/09/05 12:44:09 call: C.ReadWallet +LENCHECK: 7 11 7 8 +2024/09/05 12:44:09 ===== DUMP INFO ==== +00000000 28 44 45 53 43 52 49 50 54 49 4f 4e 3d 28 43 4f |(DESCRIPTION=(CO| +00000010 4e 4e 45 43 54 5f 54 49 4d 45 4f 55 54 3d 39 30 |NNECT_TIMEOUT=90| +00000020 29 28 52 45 54 52 59 5f 43 4f 55 4e 54 3d 33 30 |)(RETRY_COUNT=30| +00000030 29 28 52 45 54 52 59 5f 44 45 4c 41 59 3d 31 30 |)(RETRY_DELAY=10| +00000040 29 28 54 52 41 4e 53 50 4f 52 54 5f 43 4f 4e 4e |)(TRANSPORT_CONN| +00000050 45 43 54 5f 54 49 4d 45 4f 55 54 3d 37 30 29 28 |ECT_TIMEOUT=70)(| +00000060 4c 4f 41 44 5f 42 41 4c 4c 41 4e 43 45 3d 4f 4e |LOAD_BALLANCE=ON| +00000070 29 28 41 44 44 52 45 53 53 3d 28 50 52 4f 54 4f |)(ADDRESS=(PROTO| +00000080 43 4f 4c 3d 54 43 50 29 28 48 4f 53 54 3d 73 63 |COL=TCP)(HOST=sc| +00000090 61 6e 31 32 2e 74 65 73 74 72 61 63 2e 63 6f 6d |an12.testrac.com| +000000a0 29 28 50 4f 52 54 3d 31 35 32 31 29 28 49 50 3d |)(PORT=1521)(IP=| +000000b0 56 34 5f 4f 4e 4c 59 29 29 28 4c 4f 41 44 5f 42 |V4_ONLY))(LOAD_B| +000000c0 41 4c 4c 41 4e 43 45 3d 4f 4e 29 28 41 44 44 52 |ALLANCE=ON)(ADDR| +000000d0 45 53 53 3d 28 50 52 4f 54 4f 43 4f 4c 3d 54 43 |ESS=(PROTOCOL=TC| +000000e0 50 29 28 48 4f 53 54 3d 73 63 61 6e 33 34 2e 74 |P)(HOST=scan34.t| +000000f0 65 73 74 72 61 63 2e 63 6f 6d 29 28 50 4f 52 54 |estrac.com)(PORT| +00000100 3d 31 35 32 31 29 28 49 50 3d 56 34 5f 4f 4e 4c |=1521)(IP=V4_ONL| +00000110 59 29 29 28 43 4f 4e 4e 45 43 54 5f 44 41 54 41 |Y))(CONNECT_DATA| +00000120 3d 28 53 45 52 56 45 52 3d 44 45 44 49 43 41 54 |=(SERVER=DEDICAT| +00000130 45 44 29 28 53 45 52 56 49 43 45 5f 4e 41 4d 45 |ED)(SERVICE_NAME| +00000140 3d 54 45 53 54 4f 52 44 53 29 29 29 |=TESTORDS)))| +00000000 2f 6f 70 74 2f 6f 72 61 63 6c 65 2f 6c 72 65 73 |/opt/oracle/lres| +00000010 74 2f 77 61 6c 6c 65 74 66 69 6c 65 |t/walletfile| +2024/09/05 12:44:09 Get credential from wallet +7 +8 +2024/09/05 12:44:09 dbuser: restdba webuser :welcome +2024/09/05 12:44:09 Connections Handle +2024/09/05 12:44:09 Working Session Aarry dbhanlde=0x1944120 +2024/09/05 12:44:09 Monitor Session Array dbhanlde=0x1a4af70 +2024/09/05 12:44:09 Open cursors +Parsing sqltext=select inst_id,con_id,open_mode,nvl(restricted,'NONE'),total_size from gv$pdbs where inst_id = SYS_CONTEXT('USERENV','INSTANCE') and name =upper(:b1) +Parsing sqltext=select count(*) from pdb_plug_in_violations where name =:b1 +2024/09/05 12:44:11 Protocol=https +2024/09/05 12:44:11 starting HTTPS/SSL server +2024/09/05 12:44:11 ==== TLS CONFIGURATION === +2024/09/05 12:44:11 srv=0xc000106000 +2024/09/05 12:44:11 cfg=0xc0000a2058 +2024/09/05 12:44:11 mux=0xc0000a2050 +2024/09/05 12:44:11 tls.minversion=771 +2024/09/05 12:44:11 CipherSuites=[49200 49172 157 53] +2024/09/05 12:44:11 cer=/opt/oracle/lrest/certificates/tls.crt +2024/09/05 12:44:11 key=/opt/oracle/lrest/certificates/tls.key +2024/09/05 12:44:11 ========================== +2024/09/05 12:44:11 HTTPS: Listening port=8888 +2024/09/05 12:44:23 call BasicAuth Succeded +2024/09/05 12:44:23 HTTP: [1:0] Invalid credential <-- This message can be ignored + +``` + +**lrest Pod creation** - parameters list +| Name | Dcription | +--------------------------|-------------------------------------------------------------------------------| +|cdbName | Name of the container database (db) | +|lrestImage (DO NOT EDIT) | **container-registry.oracle.com/database/lrest-dboper:latest** use the latest label availble on OCR | +|dbTnsurl | The string of the tns alias to connect to cdb. Attention: remove all white space from string | +|deletePdbCascade | Delete all of the PDBs associated to a CDB resource when the CDB resource is dropped using [imperative approach](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/imperative-command/) | +|cdbAdminUser | Secret: the administrative (admin) user | +|fileNameConversions | Use file name conversion if you are not using ASM | +|cdbAdminPwd | Secret: the admin user password | +|webServerUser | Secret: the HTTPS user | +|webServerPwd | Secret: the HTTPS user password | +|cdbTlsCrt | Secret: the `tls.crt ` | +|cdbPubKey | Secret: the public key | +|cdbPrvKey | Secret: the private key | + + + + +### Create PDB + +To create a pluggable database, apply the yaml file [`create_pdb1_resource.yaml`](./usecase/create_pdb1_resource.yaml) + +```bash +kubectl apply -f create_pdb1_resource.yaml +``` +Check the status of the resource and the PDB existence on the container db: + +```bash +kubectl get lrpdb -n pdbnamespace +NAME CONNECT_STRING CDB NAME LRPDB NAME LRPDB STATE LRPDB SIZE STATUS MESSAGE LAST SQLCODE +lrpdb1 (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdbdev))) DB12 pdbdev MOUNTED 2G Ready Success +``` + +```bash +SQL> show pdbs + + CON_ID CON_NAME OPEN MODE RESTRICTED +---------- ------------------------------ ---------- ---------- + 2 PDB$SEED READ ONLY NO + 3 PDBDEV MOUNTED +SQL> +``` +``Note that after creation, the PDB is not open. You must explicitly open it using a dedicated `yaml` file. + +**pdb creation** - parameters list + +| Name | Dcription | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database | +|pdbName | Name of the PDB that you want to create | +|assertiveLrpdbDeletion | Boolean: Turn on the imperative approach on PDB resource deletion | +|adminpdbUser | Secret: PDB admin user | +|adminpdbPass | Secret: password of PDB admin user | +|lrpdbTlsKey | Secret: `tls.key ` | +|lrpdbTlsCrt | Secret: `tls.crt` | +|lrpdbTlsCat | Secret: `ca.crt` | +|webServerUser | Secret: the HTTPS user | +|webServerPwd | Secret: the HTTPS user password | +|cdbPrvKey | Secret: private key | +|cdbPubKey | Secret: public key | +|pdbconfigmap | kubernetes config map that contains the PDB initialization (init) parameters | + +> NOTE: **assertiveLrpdbDeletion** must be specified for the following PDB actions **CLONE** **CREATE** **PLUG** **MAP**. + +🔥 **assertiveLrpdbDeletion** drops pluggable database using **INCLUDE DATAFILES** option + +All of the parameters **adminpdbUser** **adminpdbPass** **lrpdbTlsKey** **lrpdbTlsCrt** **lrpdbTlsCat** **webServerUser** **webServerPwd** **cdbPrvKey** **cdbPubKey** must be specified in all PDB lifecycle management `yaml` files. To simplify presentation of requirements, we will not include them in the subsequent tables. + + +#### pdb config map + +By using **pdbconfigmap** it is possible to specify a kubernetes `configmap` with init PDB parameters. The config map payload has the following format: + + +``` +;; +;; +;; +.... +.... +;; +``` + +Example of `configmap` creation: + +```bash +cat < parameters.txt +session_cached_cursors;100;spfile +open_cursors;100;spfile +db_file_multiblock_read_count;16;spfile +EOF + +kubectl create configmap config-map-pdb -n pdbnamespace --from-file=./parameters.txt + +kubectl describe configmap config-map-pdb -n pdbnamespace +Name: config-map-pdb +Namespace: pdbnamespace +Labels: +Annotations: + +Data +==== +parameters.txt: +---- +session_cached_cursors;100;spfile +open_cursors;100;spfile +db_file_multiblock_read_count;16;spfile +test_invalid_parameter;16;spfile +``` + +- If specified, the `configmap` is applied during PDB **cloning**, **opening** and **plugging** +- The `configmap` is not monitored by the reconciliation loop; this feature will be available in future releases. This means that if someone decides to manually alter an init parameter, then the operator does not take any actions to syncronize PDB configuration with the `configmap`. +- **Alter system parameter feature** will be available in future releases. +- An application error with the `configmap` (for whatever reason) does not stop processes from completing. A warning with the associated SQL code is reported in the log file. + + + +### Open PDB + +To open the PDB, use the file [`open_pdb1_resource.yaml`](./usecase/open_pdb1_resource.yaml): + +```bash +kubectl apply -f open_pdb1_resource.yaml +``` + + **pdb opening** - parameters list + +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB) that you are creating | +|action | Use **Modify** to open the PDB | +|pdbState | Use **OPEN** to open the PDB | +|modifyOption | Use **READ WRITE** to open the PDB | + +### Close PDB + +To close the PDB, use the file [`close_pdb1_resource.yaml`](./usecase/close_pdb1_resource.yaml): + +```bash +kubectl apply -f close_pdb1_resource.yaml +``` +**pdb closing** - parameters list +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB) that you want to create | +|action | Use **Modify** to close the PDB | +|pdbState | Use **CLOSE** to close the PDB | +|modifyOption | Use **IMMEDIATE** to close the PDB | + +### Clone PDB ### + +To clone the PDB, use the file [`clone_pdb1_resource.yaml`](./usecase/clone_pdb1_resource.yaml): + +```bash +kubeclt apply -f clone_pdb1_resource.yaml +``` +**pdb cloning** - parameters list +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | The name of the new pluggable database (PDB) | +|srcPdbName | The name of the source PDB | +|fileNameConversions | File name convert pattern **("path1","path2")** or **NONE** | +|totalSize | Set **unlimited** for cloning | +|tempSize | Set **unlimited** for cloning | +|pdbconfigmap | kubernetes `configmap` which contains the PDB init parameters | +|action | Use **clone** to clone the PDB | + +### Unplug PDB + +To unplug the PDB, use the file [`unplug_pdb1_resource.yaml`](./usecase/unplug_pdb1_resource.yaml): + +**pdb unplugging** +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB)| +### Plug PDB + +To plug in the PDB, use the file [`plug_pdb1_resource.yaml`](./usecase/plug_pdb1_resource.yaml). In this example, we plug in the PDB that was unpluged in the previous step: + +**pdb plugging** +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB)| | +|pdbName | Name of the pluggable database (PDB) | +|**xmlFileName** | Path of the XML file | +|action | **plug** | +|fileNameConversions | File name convert pattern **("path1","path2")** or **NONE** | +|sourceFileNameConversion | See parameter [SOURCE_FILE_NAME_CONVERT](https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CREATE-PLUGGABLE-DATABASE.html#GUID-F2DBA8DD-EEA8-4BB7-A07F-78DC04DB1FFC__CCHEJFID) documentation | +|pdbconfigmap | Kubernetes `configmap` that contains the PDB init parameters | + +### Delete PDB + +To delete the PDB, use the file [`delete_pdb1_resource.yaml`](./usecase/delete_pdb1_resource.yaml) + +**pdb deletion** + +| Name | Dcription/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|action | **Delete** | +|dropAction | **INCLUDING** - Including datafiles or **NONE** | + + +### Map PDB + +If you need to create a CRD for an existing PDB, then you can use the map option by applying the file [`map_pdb1_resource.yaml`](./usecase/map_pdb1_resource.yaml) +Map functionality can be used in a situation where you have a pdb which is not registered in the operator as a CRD. It's a temporary solution while waiting the autodiscovery to be available. + + + diff --git a/docs/multitenant/lrest-based/images/Generalschema2.jpg b/docs/multitenant/lrest-based/images/Generalschema2.jpg new file mode 100644 index 00000000..7e7c20c0 Binary files /dev/null and b/docs/multitenant/lrest-based/images/Generalschema2.jpg differ diff --git a/docs/multitenant/lrest-based/images/UsecaseSchema.jpg b/docs/multitenant/lrest-based/images/UsecaseSchema.jpg new file mode 100644 index 00000000..14eb0d86 Binary files /dev/null and b/docs/multitenant/lrest-based/images/UsecaseSchema.jpg differ diff --git a/docs/multitenant/lrest-based/usecase/README.md b/docs/multitenant/lrest-based/usecase/README.md new file mode 100644 index 00000000..98897d7b --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/README.md @@ -0,0 +1,139 @@ + + + +# Use case directory + +The use case directory contains the `yaml` files to test the multitenant controller functionalities: create `lrest` pod, and create PDB operations *create / open / close / unplug / plug / delete / clone /map / parameter session* + +## Makefile helper + +Customizing `yaml` files (tns alias / credential / namespaces name, and so on) is a long procedure that is prone to human error. A simple [`makefile`](../usecase/makefile) is available to quickly and safely configure `yaml` files with your system environment information. Just edit the [parameter file](../usecase/parameters.txt) before proceding. + +```text +TNSALIAS...............:[Tnsalias do not use quotes and avoid space in the string --> (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELA....] +DBUSER.................:[CDB admin user] +DBPASS.................:[CDB admin user password] +WBUSER.................:[HTTPS user] +WBPASS.................:[HTTPS user password] +PDBUSR.................:[PDB admin user] +PDBPWD.................:[PDB admin user password] +PDBNAMESPACE...........:[pdb namespace] +LRSNAMESPACE...........:[cdb namespace] +COMPANY................:[your company name] +APIVERSION.............:v4 --> do not edit +``` + +⚠ **WARNING: The makefile is only intended to speed up the usecase directory configuration. Use of this file for production purposes is not supported. The editing and configuration of yaml files for production system is left up to the end user** + +### Prerequisistes: + +- Ensure that **kubectl** is properly configured. +- Ensure that all requirements listed in the [operator installation page](../../../../docs/installation/OPERATOR_INSTALLATION_README.md) are implemented. (role binding,webcert,etc) +- Ensure that the administrative user (admin) on the container database is configured as documented. + +```bash +make operator +``` +This command creates the `operator-database-operator.yaml` file in the local directory, and set up the `watchnamespace` list. Note that the `yaml` file is not applied. + +```bash +make secrets +``` +This command creates all of the Secrets with the encrypted credentials. + +```bash +make genyaml +``` +*make genyaml* generates the required `yaml` files to work with multitenant controllers. + + +![image](../images/UsecaseSchema.jpg) + +## Diag commands and troubleshooting + +### Connect to rest server pod + +```bash +/usr/bin/kubectl exec -n -it -- /bin/bash +``` + + +```bash +## example ## + +kubectl get pods -n cdbnamespace +NAME READY STATUS RESTARTS AGE +cdb-dev-lrest-rs-fnw99 1/1 Running 1 (17h ago) 18h + +kubectl exec cdb-dev-lrest-rs-fnw99 -n cdbnamespace -it -- /bin/bash +[oracle@cdb-dev-lrest-rs-fnw99 ~]$ +``` + +### Monitor control plane + +```bash +kubectl logs -f -l control-plane=controller-manager -n oracle-database-operator-system +``` +```bash +## output example: ## +2024-10-28T23:54:25Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb2 +2024-10-28T23:54:25Z INFO lrpdb-webhook validateCommon {"name": "lrpdb2"} +2024-10-28T23:54:25Z INFO lrpdb-webhook Valdiating LRPDB Resource Action : MODIFY +2024-10-29T10:07:34Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb2 +2024-10-29T10:07:34Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb1 +2024-10-29T16:49:15Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb1 +2024-10-29T16:49:15Z INFO lrpdb-webhook validateCommon {"name": "lrpdb1"} +2024-10-29T16:49:15Z INFO lrpdb-webhook Valdiating LRPDB Resource Action : CREATE +2024-10-29T10:07:20Z INFO controller-runtime.certwatcher Updated current TLS certificate +2024-10-29T10:07:20Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} +2024-10-29T10:07:20Z INFO controller-runtime.certwatcher Starting certificate watcher +I1029 10:07:20.189724 1 leaderelection.go:250] attempting to acquire leader lease oracle-database-operator-system/a9d608ea.oracle.com... +2024-10-29T16:49:15Z INFO lrpdb-webhook Setting default values in LRPDB spec for : lrpdb1 + +``` + +### Error decrypting credential + +The following is an example of a resource creation failure due to decription error: + +```text +2024-10-30T10:09:08Z INFO controllers.LRPDB getEncriptedSecret :pdbusr {"getEncriptedSecret": {"name":"lrpdb1","namespace":"pdbnamespace"}} +2024-10-30T10:09:08Z ERROR controllers.LRPDB Failed to parse private key - x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format) {"DecryptWithPrivKey": {"name":"lrpdb1","namespace":"pdbnamespace"}, "error": "x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)"} +``` + + +**Solution**: Ensure you use **PCKS8** format during private key generation. If you are not using `openssl3`, then run this command: + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > mykey +``` + +### Crd details + +Use the **describe** option to obtain `crd` information + +```bash +kubectl describe lrpdb lrpdb1 -n pdbnamespace +[...] + Secret: + Key: e_wbuser.txt + Secret Name: wbuser +Status: + Action: CREATE + Bitstat: 25 + Bitstatstr: |MPAPPL|MPWARN|MPINIT| + Conn String: (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdbdev))) + Msg: Success + Open Mode: MOUNTED + Phase: Ready + Status: true + Total Size: 2G +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Created 108s LRPDB LRPDB 'pdbdev' created successfully + Normal Created 108s LRPDB PDB 'pdbdev' assertive pdb deletion turned on + Warning LRESTINFO 95s LRPDB pdb=pdbdev:test_invalid_parameter:16:spfile:2065 + Warning Done 15s (x12 over 2m25s) LRPDB cdb-dev + +``` diff --git a/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml new file mode 100644 index 00000000..0467a948 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Alter" + alterSystemParameter : "cpu_count" + alterSystemValue : "3" + parameterScope : "memory" + + + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml b/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml new file mode 100644 index 00000000..5fd355f4 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: cdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml new file mode 100644 index 00000000..2c4afe13 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml new file mode 100644 index 00000000..16255a87 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb4 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml new file mode 100644 index 00000000..87f7383d --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml new file mode 100644 index 00000000..0743bd8c --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml new file mode 100644 index 00000000..6c6ca519 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml b/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml new file mode 100644 index 00000000..2769b498 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: pdbnamespace +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile diff --git a/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml b/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml new file mode 100644 index 00000000..2769b498 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: pdbnamespace +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile diff --git a/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml b/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml new file mode 100644 index 00000000..b80c1c56 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v4 +kind: LREST +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + lrestImage: container-registry.oracle.com/database/operator:lrest-241210-amd64 + lrestImagePullPolicy: "Always" + dbTnsurl : "(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + cdbAdminUser: + secret: + secretName: "dbuser" + key: "e_dbuser.txt" + cdbAdminPwd: + secret: + secretName: "dbpass" + key: "e_dbpass.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbPubKey: + secret: + secretName: "pubkey" + key: "publicKey" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml new file mode 100644 index 00000000..fa58d36a --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml @@ -0,0 +1,52 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml new file mode 100644 index 00000000..02d5763b --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml @@ -0,0 +1,52 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml new file mode 100644 index 00000000..1a3c328a --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml new file mode 100644 index 00000000..747641d4 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/makefile b/docs/multitenant/lrest-based/usecase/makefile new file mode 100644 index 00000000..1de320ad --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/makefile @@ -0,0 +1,911 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 3) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# lrestnamespace_binding.yaml : role binding for lrestnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_lrest_secret.yaml : create secrets for rest server pod +# create_lrpdb_secret.yaml : create secrets for pluggable database +# create_lrest_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_lrpdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# altersystem_pdb1_resource.yaml : chage cpu_count count parameter for the first pdb +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export DBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep DBUSER|cut -d : -f 2) +export DBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep DBPASS|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export PDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) +export LRSNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep LRSNAMESPACE|cut -d : -f 2) +export LRESTIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep LRESTIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m + +REST_SERVER=lrest +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +DBUSERFILE=dbuser.txt +DBPASSFILE=dbpass.txt +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + +################# +### FILE LIST ### +################# + +export LREST_POD=create_lrest_pod.yaml + +export LRPDBCRE1=create_pdb1_resource.yaml +export LRPDBCRE2=create_pdb2_resource.yaml + +export LRPDBCLOSE1=close_pdb1_resource.yaml +export LRPDBCLOSE2=close_pdb2_resource.yaml +export LRPDBCLOSE3=close_pdb3_resource.yaml + +export LRPDBOPEN1=open_pdb1_resource.yaml +export LRPDBOPEN2=open_pdb2_resource.yaml +export LRPDBOPEN3=open_pdb3_resource.yaml + +export LRPDBCLONE1=clone_pdb1_resource.yaml +export LRPDBCLONE2=clone_pdb2_resource.yaml + +export LRPDBDELETE1=delete_pdb1_resource.yaml +export LRPDBDELETE2=delete_pdb2_resource.yaml +export LRPDBDELETE3=delete_pdb3_resource.yaml + +export LRPDBUNPLUG1=unplug_pdb1_resource.yaml +export LRPDBPLUG1=plug_pdb1_resource.yaml + +export LRPDBMAP1=map_pdb1_resource.yaml +export LRPDBMAP2=map_pdb2_resource.yaml +export LRPDBMAP3=map_pdb3_resource.yaml + +export LRPDBMAP1=map_pdb1_resource.yaml +export LRPDBMAP2=map_pdb2_resource.yaml +export LRPDBMAP3=map_pdb3_resource.yaml + +export ALTERSYSTEMYAML=altersystem_pdb1_resource.yaml +export CONFIG_MAP=config_map_pdb.yaml + + + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +check: + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "DBUSER.................:%s\n" $(DBUSER) + @printf "DBPASS.................:%s\n" $(DBPASS) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "PDBNAMESPACE...........:%s\n" $(PDBNAMESPACE) + @printf "LRSNAMESPACE...........:%s\n" $(LRSNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +tls: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(LRSNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(LRSNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +secrets: tls delsecrets + $(call msg,"CREATING NEW TLS/PRVKEY/PUBKEY SECRETS") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(LRSNAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(PDBNAMESPACE) + #$(KUBECTL) create secret tls prvkey --key="$(PRVKEY)" --cert=ca.crt -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey="$(PRVKEY)" -n $(PDBNAMESPACE) + $(call msg,"CREATING NEW CREDENTIAL SECRETS") + @$(ECHO) $(DBUSER) > $(DBUSERFILE) + @$(ECHO) $(DBPASS) > $(DBPASSFILE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(DBUSERFILE) |base64 > e_$(DBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(DBPASSFILE) |base64 > e_$(DBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic dbuser --from-file=e_$(DBUSERFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic dbpass --from-file=e_$(DBPASSFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(PDBNAMESPACE) + $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl \ + $(DBUSERFILE) $(DBPASSFILE) $(WBUSERFILE) $(WBPASSFILE) $(PDBUSRFILE) $(PDBPWDFILE)\ + e_$(DBUSERFILE) e_$(DBPASSFILE) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + $(KUBECTL) get secrets -n $(LRSNAMESPACE) + $(KUBECTL) get secrets -n $(PDBNAMESPACE) + +delsecrets: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(PDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + $(eval SECRETSL:=$(shell kubectl get secrets -n $(LRSNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(PDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(PDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + @[ "${SECRETSL}" ] && ( \ + printf "Deleteing secrets in namespace -n $(LRSNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSL) -n $(LRSNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + +cleanCert: + $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl \ + $(DBUSERFILE) $(DBPASSFILE) $(WBUSERFILE) $(WBPASSFILE) $(PDBUSRFILE) $(PDBPWDFILE)\ + e_$(DBUSERFILE) e_$(DBPASSFILE) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + +### YAML FILE SECTION ### +define _opr +cp ${ORACLE_OPERATOR_YAML} . +export OPBASENAME=`basename ${ORACLE_OPERATOR_YAML}` +#export PDBNAMESPACE=$(cat ${PARAMETERS}|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) + +cp ${OPBASENAME} ${OPBASENAME}.ORIGNINAL +printf "\n\t\xF0\x9F\x91\x89 ${OPBASENAME}\n\n" +printf "\n\t\xF0\x9F\x91\x89 ${PDBNAMESPACE}\n\n" +sed -i 's/value: ""/value: ${OPRNAMESPACE},$PDBNAMESPACE,${LRSNAMESPACE}/g' ${OPBASENAME} +endef + +export opr = $(value _opr) + +operator: +# @ eval "$$opr" + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE),$(PDBNAMESPACE),$(LRSNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection.yaml + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${PDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${PDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${LRSNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${LRSNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + + +#echo lrest pod creation +define _script01 +cat < ${LREST_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: LREST +metadata: + name: cdb-dev + namespace: ${LRSNAMESPACE} +spec: + cdbName: "DB12" + lrestImage: ${LRESTIMG} + lrestImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true + cdbAdminUser: + secret: + secretName: "dbuser" + key: "e_dbuser.txt" + cdbAdminPwd: + secret: + secretName: "dbpass" + key: "e_dbpass.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbPubKey: + secret: + secretName: "pubkey" + key: "publicKey" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${LRPDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${LRPDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${LRPDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${LRPDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${LRPDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${LRPDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" +EOF + +cat < ${LRPDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb4 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" +EOF + +cat < ${LRPDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${LRPDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${LRPDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/var/tmp/pdb.$$.xml" + action: "Unplug" +EOF + +cat <${LRPDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/var/tmp/pdb.$$.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertiveLrpdbDeletion: true + pdbconfigmap: "config-map-pdb" + action: "Plug" +EOF + +cat <${LRPDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${LRPDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${LRPDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${CONFIG_MAP} +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: ${PDBNAMESPACE} +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile +EOF + + +cat < ${ALTERSYSTEMYAML} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Alter" + alterSystemParameter : "cpu_count" + alterSystemValue : "3" + parameterScope : "memory" + + +EOF + +## Auth information +for _file in ${LRPDBCRE1} ${LRPDBCRE2} ${LRPDBOPEN1} ${LRPDBOPEN2} ${LRPDBOPEN3} ${LRPDBCLOSE1} ${LRPDBCLOSE2} ${LRPDBCLOSE3} ${LRPDBCLONE1} ${LRPDBCLONE2} ${LRPDBDELETE1} ${LRPDBDELETE2} ${LRPDBUNPLUG1} ${LRPDBPLUG1} ${LRPDBMAP1} ${LRPDBMAP2} ${LRPDBMAP3} ${ALTERSYSTEMYAML} +do +ls -ltr ${_file} + cat authsection.yaml >> ${_file} +done +rm authsection.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(LRPDBMAP3) $(LRPDBMAP2) $(LRPDBMAP1) $(LRPDBPLUG1) $(LRPDBUNPLUG1) $(LRPDBDELETE2) $(LRPDBDELETE1) $(LRPDBCLONE2) $(LRPDBCLONE1) $(LRPDBCLOSE3) $(LRPDBCLOSE2) $(LRPDBCLOSE1) $(LRPDBOPEN3) $(LRPDBOPEN2) $(LRPDBOPEN1) $(LRPDBCRE2) $(LRPDBCRE1) $(LREST_POD) ${ALTERSYSTEMYAML} + - $(RM) ${CONFIG_MAP} ${PDBNAMESPACE}_binding.yaml ${LRSNAMESPACE}_binding.yaml + + + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(LRSNAMESPACE)|grep rest|cut -d ' ' -f 1` -n $(LRSNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"lrest pod creation") + - $(KUBECTL) delete lrest cdb-dev -n $(LRSNAMESPACE) + $(KUBECTL) apply -f $(LREST_POD) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrest cdb-dev -n $(LRSNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrest pod completed") + $(KUBECTL) get lrest -n $(LRSNAMESPACE) + $(KUBECTL) get pod -n $(LRSNAMESPACE) + +run01.1: + @$(call msg,"lrpdb pdb1 creation") + $(KUBECTL) apply -f $(LRPDBCRE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 creation completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run01.2: + @$(call msg, "lrpdb pdb2 creation") + $(KUBECTL) apply -f $(LRPDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb2 creation completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + +run02.1: + @$(call msg, "lrpdb pdb1 open") + $(KUBECTL) apply -f $(LRPDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 open completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run02.2: + @$(call msg,"lrpdb pdb2 open") + $(KUBECTL) apply -f $(LRPDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb2 open completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(LRPDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb3 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get lrpdb pdb3 -n $(PDBNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(LRPDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get lrpdb pdb3 -n $(PDBNAMESPACE) + + +run04.1: + @$(call msg,"lrpdb pdb1 close") + $(KUBECTL) apply -f $(LRPDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 close completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run04.2: + @$(call msg,"lrpdb pdb2 close") + $(KUBECTL) apply -f $(LRPDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb2 close completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + +run05.1: + @$(call msg,"lrpdb pdb1 unplug") + $(KUBECTL) apply -f $(LRPDBUNPLUG1) + $(KUBECTL) wait --for=delete lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb1 unplug completed") + +run06.1: + @$(call msg, "lrpdb pdb1 plug") + $(KUBECTL) apply -f $(LRPDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 plug completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run07.1: + @$(call msg,"lrpdb pdb1 delete ") + - $(KUBECTL) apply -f $(LRPDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(LRPDBDELETE1) + $(KUBECTL) wait --for=delete lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb1 delete") + $(KUBECTL) get lrpdb -n $(PDBNAMESPACE) + +run99.1: + $(KUBECTL) delete lrest cdb-dev -n $(LRSNAMESPACE) + $(KUBECTL) wait --for=delete lrest cdb-dev -n $(LRSNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get lrest -n $(LRSNAMESPACE) + $(KUBECTL) get lrpdb -n $(PDBNAMESPACE) + +runall01: run00 run01.1 run01.2 run02.1 run02.2 run03.1 run03.2 run04.1 run04.2 run05.1 run06.1 run07.1 + + diff --git a/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml new file mode 100644 index 00000000..2cd57b87 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml new file mode 100644 index 00000000..bab614cf --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml new file mode 100644 index 00000000..7bbae48d --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml new file mode 100644 index 00000000..a845a0bd --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml new file mode 100644 index 00000000..9356184f --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml new file mode 100644 index 00000000..1b8024ba --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/parameters.txt b/docs/multitenant/lrest-based/usecase/parameters.txt new file mode 100644 index 00000000..1f21ed38 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/parameters.txt @@ -0,0 +1,52 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +LRESTIMG:container-registry.oracle.com/database/operator:lrest-241210-amd64 + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## CDB USER FOR PDB LIFECYCLE MANAGMENT ### +########################################### + +DBUSER:restdba +DBPASS:CLWKO655321 + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:welcome +WBPASS:welcome1 + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:Citizenkane +PDBPWD:Rosebud + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +LRSNAMESPACE:cdbnamespace + + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml b/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml new file mode 100644 index 00000000..5af79ed6 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: pdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml new file mode 100644 index 00000000..d7d310db --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml @@ -0,0 +1,54 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertiveLrpdbDeletion: true + pdbconfigmap: "config-map-pdb" + action: "Plug" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml new file mode 100644 index 00000000..a5da5a57 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/NamespaceSeg.md b/docs/multitenant/ords-based/NamespaceSeg.md new file mode 100644 index 00000000..6738fe56 --- /dev/null +++ b/docs/multitenant/ords-based/NamespaceSeg.md @@ -0,0 +1,14 @@ + + +# Namespace segregation + +With the namespace segregation pdb controller and cdb controller run in different namespaces. The new functionality introduces a new parameter (the cdb namespace) in pdb crd definition. In case you don't need the namespace segregation you have to sepcify the namespace name that you are using for yours crd and pods anyway. Refer to usercase01 and usecase02 to see single namespace configuration. Refer to usecase03 to see examples of namespace segregation. + +# Secrets + +In order to use multiple namespace we need to create approriate secrets in each namespace. Tls certificate secrets must be created in all namespaces (db-ca db-tls). + +![general_schema](./images/K8S_NAMESPACE_SEG.png) + + + diff --git a/docs/multitenant/ords-based/README.md b/docs/multitenant/ords-based/README.md new file mode 100644 index 00000000..edfd0208 --- /dev/null +++ b/docs/multitenant/ords-based/README.md @@ -0,0 +1,411 @@ + + +# Oracle Multitenant Database Controllers + +The Oracle Database Operator for Kubernetes uses two controllers to manage the [Pluggable Database lifecycle][oradocpdb] + +- CDB controller +- PDB controller + +By using CDB/PDB controllers, you can perform the following actions **CREATE**, **MODIFY(OPEN/COSE)**, **DELETE**, **CLONE**, **PLUG** and **UNPLUG** against pluggable database + +Examples are located under the following directories: + +- the directories [`Usecase`](./usecase/) and [`usecase01`](./usecase01/) contain a [configuration file](./usecase/parameters.txt) where you can specify all the details of your environment. A [`makefile`](./usecase/makefile) takes this file as input to generate all of the `yaml` files. There is no need to edit `yaml` files one by one. +- [Singlenamespace provisioning](./provisioning/singlenamespace/) This file contains base example files that you can use to manage the PDB and CDB within a single namespace. +- [Multinamespace provisioning](./provisioning/multinamespace/) This file contains base example files that you can use to manage the PDB and CDB in different namespaces. +- [Usecase01](./usecase01/README.md) [Usecase02](./usecase02/README.md) This file contains other step-by-step examples; + +Automatic `yaml` generation is not available for the directory `usecase02` and provisioning directories. + +**NOTE** the CDB controller is not intended to manage the container database. The CDB controller is meant to provide a pod with a REST server connected to the container database that you can use to manage PDBs. + + +## Macro steps for setup + +- Deploy the Oracle Database Operator (operator, or `OraOperator`) +- [Create Ords based image for CDB pod](./provisioning/ords_image.md) +- [Container RDBMB user creation](#prepare-the-container-database-for-pdb-lifecycle-management-pdb-lm) +- Create certificates for https connection +- Create secrets for credentials and certificates +- Create CDB pod using the Ords based image + +## Oracle DB Operator Multitenant Database Controller Deployment + +To deploy `OraOperator`, use this [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. + +After the **Oracle Database Operator** is deployed, you can see the Oracle Database (DB) Operator Pods running in the Kubernetes Cluster. The multitenant controllers are deployed as part of the `OraOperator` deployment. You can see the CRDs (Custom Resource Definition) for the CDB and PDBs in the list of CRDs. The following output is an example of such a deployment: + +```bash +[root@test-server oracle-database-operator]# kubectl get ns +NAME STATUS AGE +cert-manager Active 32h +default Active 245d +kube-node-lease Active 245d +kube-public Active 245d +kube-system Active 245d +oracle-database-operator-system Active 24h <---- namespace to deploy the Oracle Database Operator + +[root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 0 28s +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 0 28s +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 0 28s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 29s +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 29s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 29s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 29s +[root@docker-test-server oracle-database-operator]# + +[root@test-server oracle-database-operator]# kubectl get crd +NAME CREATED AT +autonomouscontainerdatabases.database.oracle.com 2022-06-22T01:21:36Z +autonomousdatabasebackups.database.oracle.com 2022-06-22T01:21:36Z +autonomousdatabaserestores.database.oracle.com 2022-06-22T01:21:37Z +autonomousdatabases.database.oracle.com 2022-06-22T01:21:37Z +cdbs.database.oracle.com 2022-06-22T01:21:37Z <---- +certificaterequests.cert-manager.io 2022-06-21T17:03:46Z +certificates.cert-manager.io 2022-06-21T17:03:47Z +challenges.acme.cert-manager.io 2022-06-21T17:03:47Z +clusterissuers.cert-manager.io 2022-06-21T17:03:48Z +dbcssystems.database.oracle.com 2022-06-22T01:21:38Z +issuers.cert-manager.io 2022-06-21T17:03:49Z +oraclerestdataservices.database.oracle.com 2022-06-22T01:21:38Z +orders.acme.cert-manager.io 2022-06-21T17:03:49Z +pdbs.database.oracle.com 2022-06-22T01:21:39Z <--- +shardingdatabases.database.oracle.com 2022-06-22T01:21:39Z +singleinstancedatabases.database.oracle.com 2022-06-22T01:21:40Z +``` + + +## Prerequisites to manage PDB Life Cycle using Oracle DB Operator Multitenant Database Controller + +* [Prepare the container database (CDB) for PDB Lifecycle Management or PDB-LM](#prepare-cdb-for-pdb-lifecycle-management-pdb-lm) +* [Oracle REST Data Service or ORDS Image](#oracle-rest-data-service-ords-image) +* [Kubernetes Secrets](#kubernetes-secrets) +* [Kubernetes CRD for CDB](#cdb-crd) +* [Kubernetes CRD for PDB](#pdb-crd) + +## Prepare the container database for PDB Lifecycle Management (PDB-LM) + +Pluggable Database (PDB) management operations are performed in the Container Database (CDB). These operations include **create**, **clone**, **plug**, **unplug**, **delete**, **modify** and **map pdb**. + +To perform PDB lifecycle management operations, you must first use the following steps to define the default CDB administrator credentials on target CDBs: + +Create the CDB administrator user and grant the required privileges. In this example, the user is `C##DBAPI_CDB_ADMIN`. However, any suitable common username can be used. + +```SQL +SQL> conn /as sysdba + +-- Create following users at the database level: + +ALTER SESSION SET "_oracle_script"=true; +DROP USER C##DBAPI_CDB_ADMIN cascade; +CREATE USER C##DBAPI_CDB_ADMIN IDENTIFIED BY CONTAINER=ALL ACCOUNT UNLOCK; +GRANT SYSOPER TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; +GRANT SYSDBA TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; +GRANT CREATE SESSION TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; + + +-- Verify the account status of the following usernames. They should not be in locked status: + +col username for a30 +col account_status for a30 +select username, account_status from dba_users where username in ('ORDS_PUBLIC_USER','C##DBAPI_CDB_ADMIN','APEX_PUBLIC_USER','APEX_REST_PUBLIC_USER'); +``` + +## OCI OKE (Kubernetes Cluster) + +You can use an [OKE in Oracle Cloud Infrastructure][okelink] to configure the controllers for PDB lifecycle management. **Note that there is no restriction about container database location; it can be anywhere (on Cloud or on-premises).** +To quickly create an OKE cluster in your OCI cloud environment you can use the following [link](./provisioning/quickOKEcreation.md). +In this setup example [provisioning example setup](./provisioning/example_setup_using_oci_oke_cluster.md), the Container Database is running on an OCI Exadata Database Cluster. + + +## Oracle REST Data Service (ORDS) Image + +The PDB Database controllers require a pod running a dedicated REST server image based on [ORDS][ordsdoc]. Read the following [document on ORDS images](./provisioning/ords_image.md) to build the ORDS images. + + +## Kubernetes Secrets + + Multitenant Controllers use Kubernetes Secrets to store the required credential and HTTPS certificates. + + **Note** In multi-namespace environments you must create specific Secrets for each namespaces. + +### Secrets for CERTIFICATES + +Create the certificates and key on your local host, and then use them to create the Kubernetes Secret. + +```bash +openssl genrsa -out ca.key 2048 +openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost Root CA " -out ca.crt +openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost" -out server.csr +echo "subjectAltName=DNS:cdb-dev-ords,DNS:www.example.com" > extfile.txt +openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt +``` + +```bash +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system +kubectl create secret generic db-ca --from-file=ca.crt -n oracle-database-operator-system +``` + +image_not_found + +**Note:** Remove temporary files after successfful Secret creation. + +### Secrets for CDB CRD + + **Note:** base64 encoded secrets are no longer supported; use OpenSSL secrets as documented in the following section. After successful creation of the CDB Resource, the CDB and PDB Secrets can be deleted from the Kubernetes system. Don't leave plaintext files containing sensitive data on disk. After loading the Secret, remove the plaintext file or move it to secure storage. + + ```bash + +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + +# Webuser credential +echo [WBUSER] > ${WBUSERFILE} +echo [WBPASS] > ${WBPASSFILE} + +# CDB admin user credentioan +echo [CDBPWD] > ${CDBPWDFILE} +echo [CDBUSR] > ${CDBUSRFILE} + +# SYS Password +echo [SYSPWD] > ${SYSPWDFILE} + +# Ords Password +echo [ORDPWD] > ${ORDPWDFILE} + +## PDB admin credential +echo [PDBUSR] > ${PDBUSRFILE} +echo [PDBPWD] > ${PDBPWDFILE} + +#Secrets creation for pub and priv keys +openssl rsa -in ${PRVKEY} -outform PEM -pubout -out ${PUBKEY} +kubectl create secret generic pubkey --from-file=publicKey=${PUBKEY} -n ${CDBNAMESPACE} +kubectl create secret generic prvkey --from-file=privateKey=${PRVKEY} -n ${CDBNAMESPACE} +kubectl create secret generic prvkey --from-file=privateKey="${PRVKEY}" -n ${PDBNAMESPACE} + +#Password encryption +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${WBUSERFILE} |base64 > e_${WBUSERFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${WBPASSFILE} |base64 > e_${WBPASSFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${CDBPWDFILE} |base64 > e_${CDBPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${CDBUSRFILE} |base64 > e_${CDBUSRFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${SYSPWDFILE} |base64 > e_${SYSPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${ORDPWDFILE} |base64 > e_${ORDPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${PDBUSRFILE} |base64 > e_${PDBUSRFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${PDBPWDFILE} |base64 > e_${PDBPWDFILE} + +#Ecrypted secrets creation +kubectl create secret generic wbuser --from-file=e_${WBUSERFILE} -n ${CDBNAMESPACE} +kubectl create secret generic wbpass --from-file=e_${WBPASSFILE} -n ${CDBNAMESPACE} +kubectl create secret generic wbuser --from-file=e_${WBUSERFILE} -n ${PDBNAMESPACE} +kubectl create secret generic wbpass --from-file=e_${WBPASSFILE} -n ${PDBNAMESPACE} +kubectl create secret generic cdbpwd --from-file=e_${CDBPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic cdbusr --from-file=e_${CDBUSRFILE} -n ${CDBNAMESPACE} +kubectl create secret generic syspwd --from-file=e_${SYSPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic ordpwd --from-file=e_${ORDPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic pdbusr --from-file=e_${PDBUSRFILE} -n ${PDBNAMESPACE} +kubectl create secret generic pdbpwd --from-file=e_${PDBPWDFILE} -n ${PDBNAMESPACE} + +#Get rid of the swap files +rm ${WBUSERFILE} ${WBPASSFILE} ${CDBPWDFILE} ${CDBUSRFILE} \ + ${SYSPWDFILE} ${ORDPWDFILE} ${PDBUSRFILE} ${PDBPWDFILE} \ + e_${WBUSERFILE} e_${WBPASSFILE} e_${CDBPWDFILE} e_${CDBUSRFILE} \ + e_${SYSPWDFILE} e_${ORDPWDFILE} e_${PDBUSRFILE} e_${PDBPWDFILE} +``` + +Check Secrets details + +```bash +kubectl describe secrets syspwd -n cdbnamespace +Name: syspwd +Namespace: cdbnamespace +Labels: +Annotations: + +Type: Opaque + +Data +==== +e_syspwd.txt: 349 bytes +``` +Example of `yaml` file Secret section: + +```yaml +[...] + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" +[...] +``` + +## CDB CRD + +The Oracle Database Operator Multitenant Controller creates the CDB as a custom resource object kind that models a target CDB as a native Kubernetes object. This object kind is used only to create Pods to connect to the target CDB to perform PDB-LM operations. Each CDB resource follows the CDB CRD as defined here: [`config/crd/bases/database.oracle.com_cdbs.yaml`](../../../config/crd/bases/database.oracle.com_cdbs.yaml) + +To create a CDB CRD, use this example`.yaml` file: [`cdb_create.yaml`](../multitenant/provisioning/singlenamespace/cdb_create.yaml) + +**Note:** The password and username fields in this *cdb.yaml* Yaml are the Kubernetes Secrets created earlier in this procedure. For more information, see the section [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). To understand more about creating secrets for pulling images from a Docker private registry, see [Kubernetes Private Registry Documenation]( https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + +Create a CDB CRD Resource example + +```bash +kubectl apply -f cdb_create.yaml +``` + +see [usecase01][uc01] and usecase02[uc02] for more information about file configuration + +## PDB CRD + +The Oracle Database Operator Multitenant Controller creates the PDB object kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. You cannot have more than one Kubernetes resource for a target PDB. This PDB resource can be used to perform PDB-LM operations by specifying the action attribute in the PDB Specs. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) + +Yaml file [pdb_create.yaml](../multitenant/provisioning/singlenamespace/pdb_create.yaml) to create a pdb + +```bash +kubectl apply -f pdb_create.yaml +``` + +## CRD TABLE PARAMETERS + +| yaml file parameters | value | description /ords parameter | CRD | +|------------------ |--------------------------- |-------------------------------------------------------------------------------|-----------| +| dbserver | or | [--db-hostname][1] | CDB | +| dbTnsurl | | [--db-custom-url/db.customURL][dbtnsurl] | CDB | +| port | | [--db-port][2] | CDB | +| cdbName | | Container Name | CDB | +| name | | ORDS podname prefix in `cdb.yaml` | CDB | +| name | | Pdb resource in `pdb.yaml` | PDB | +| ordsImage | ords-dboper:latest | ORDS pod public container registry | CDB | +| pdbName | | Pluggable database (PDB) name | Container database (CDB) | +| servicename | | [--db-servicename][3] | CDB | +| sysadmin_user | | [--admin-user][adminuser] | CDB | +| sysadmin_pwd | | [--password-stdin][pwdstdin] | CDB | +| cdbadmin_user | | [db.cdb.adminUser][1] | CDB | +| cdbadmin_pwd | | [db.cdb.adminUser.password][cdbadminpwd] | CDB | +| webserver_user | | [https user][http] NOT A DB USER | CDB PDB | +| webserver_pwd | | [http user password][http] | CDB PDB | +| ords_pwd | | [ORDS_PUBLIC_USER password][public_user] | CDB | +| pdbTlsKey | | [standalone.https.cert.key][key] | PDB | +| pdbTlsCrt | | [standalone.https.cert][cr] | PDB | +| pdbTlsCat | | certificate authority | PDB | +| cdbTlsKey | | [standalone.https.cert.key][key] | CDB | +| cdbTlsCrt | | [standalone.https.cert][cr] | CDB | +| cdbTlsCat | | Certificate authority | CDB | +| cdbOrdsPrvKey | | Private key | CDB | +| pdbOrdsPrvKey | | Private key | PDB | +| xmlFileName | | Path for the unplug and plug operation | PDB | +| srcPdbName | | Name of the database that you want to be cloned | PDB | +| action | | Create open close delete clone plug unplug and map | PDB | +| deletePdbCascade | boolean | Delete PDBs cascade during CDB deletion | CDB | +| assertivePdbDeletion | boolean | Deleting the PDB crd means deleting the PDB as well | PDB | +| fileNameConversions | | Used for database cloning | PDB | +| totalSize | | `dbsize` | PDB | +| pdbState | | Change PDB state | PDB | +| modifyOption | | To be used along with `pdbState` | PDB | +| dropAction | | Delete datafiles during PDB deletion | PDB | +| sourceFileNameConversions | | [sourceFileNameConversions(optional): string][4] | PDB | +| tdeKeystorePath | | [tdeKeystorePath][tdeKeystorePath] | N/A | +| tdeExport | | [tdeExport] | N/A ] +| tdeSecret | | [tdeSecret][tdeSecret] | N/A | +| tdePassword | | [tdeSecret][tdeSecret] | N/A | + + + + +## Usecases files list + +### Single Namespace + +1. [Create CDB](./provisioning/singlenamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/singlenamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/singlenamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/singlenamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/singlenamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/singlenamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/singlenamespace/pdb_unplug.yaml) +7. [Plug PDB](./provisioning/singlenamespace/pdb_plug.yaml) + +### Multiple namespace (cdbnamespace,dbnamespace) + +1. [Create CDB](./provisioning/multinamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/multinamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/multinamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/multinamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/multinamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/multinamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/multinamespace/pdb_unplug.yaml) + +## Known issues + + - ORDS installation failure if pluaggable databases in the container db are not openedS + + - Version 1.1.0: encoded password for https authentication may include carriage return as consequence the https request fails with http 404 error. W/A generate encoded password using **printf** instead of **echo**. + + - pdb controller authentication suddenly fails without any system change. Check the certificate expiration date **openssl .... -days 365** + + - Nothing happens after applying cdb yaml files: Make sure to have properly configured the WHATCH_NAMESPACE list in the operator yaml file + + [okelink]:https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm + + [ordsdoc]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/23.1/index.html + + [uc01]:../multitenant/usecase01/README.md + + [uc02]:../multitenant/usecase02/README.md + + [oradocpdb]:https://docs.oracle.com/en/database/oracle/oracle-database/21/multi/introduction-to-the-multitenant-architecture.html#GUID-AB84D6C9-4BBE-4D36-992F-2BB85739329F + + [1]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + + [2]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + + [3]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-DAA027FA-A4A6-43E1-B8DD-C92B330C2341:~:text=%2D%2Ddb%2Dservicename%20%3Cstring%3E + + [4]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.3/orrst/op-database-pdbs-post.html + +[adminuser]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22:~:text=Table%202%2D6%20Command%20Options%20for%20Uninstall%20CLI + +[public_user]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/using-multitenant-architecture-oracle-rest-data-services.html#GUID-E64A141A-A71F-4979-8D33-C5F8496D3C19:~:text=Preinstallation%20Tasks%20for%20Oracle%20REST%20Data%20Services%20CDB%20Installation + +[key]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=standalone.https.cert.key + +[cr]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0 + +[cdbadminpwd]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=Table%20C%2D1%20Oracle%20REST%20Data%20Services%20Configuration%20Settings + + +[pwdstdin]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-88479C84-CAC1-4133-A33E-7995A645EC05:~:text=default%20database%20pool.-,2.1.4.1%20Understanding%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation,-Table%202%2D2 + +[http]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-BEECC057-A8F5-4EAB-B88E-9828C2809CD8:~:text=Example%3A%20delete%20%5B%2D%2Dglobal%5D-,user%20add,-Add%20a%20user + +[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 + +[tdeKeystorePath]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/21.4/orrst/op-database-pdbs-pdb_name-post.html + +[tdeSecret]:https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/ADMINISTER-KEY-MANAGEMENT.html#GUID-E5B2746F-19DC-4E94-83EC-A6A5C84A3EA9 +~ + + + + + diff --git a/docs/multitenant/ords-based/images/K8S_NAMESPACE_SEG.png b/docs/multitenant/ords-based/images/K8S_NAMESPACE_SEG.png new file mode 100644 index 00000000..594471ed Binary files /dev/null and b/docs/multitenant/ords-based/images/K8S_NAMESPACE_SEG.png differ diff --git a/docs/multitenant/ords-based/images/K8S_SECURE1.png b/docs/multitenant/ords-based/images/K8S_SECURE1.png new file mode 100644 index 00000000..292c9335 Binary files /dev/null and b/docs/multitenant/ords-based/images/K8S_SECURE1.png differ diff --git a/docs/multitenant/ords-based/images/K8S_SECURE2.png b/docs/multitenant/ords-based/images/K8S_SECURE2.png new file mode 100644 index 00000000..b9713d7c Binary files /dev/null and b/docs/multitenant/ords-based/images/K8S_SECURE2.png differ diff --git a/docs/multitenant/ords-based/images/K8S_SECURE3.png b/docs/multitenant/ords-based/images/K8S_SECURE3.png new file mode 100644 index 00000000..b70123c1 Binary files /dev/null and b/docs/multitenant/ords-based/images/K8S_SECURE3.png differ diff --git a/docs/multitenant/ords-based/images/K8S_SECURE4.png b/docs/multitenant/ords-based/images/K8S_SECURE4.png new file mode 100644 index 00000000..860144e7 Binary files /dev/null and b/docs/multitenant/ords-based/images/K8S_SECURE4.png differ diff --git a/docs/multitenant/ords-based/images/makerunall.png b/docs/multitenant/ords-based/images/makerunall.png new file mode 100644 index 00000000..ab856f90 Binary files /dev/null and b/docs/multitenant/ords-based/images/makerunall.png differ diff --git a/docs/multitenant/ords-based/images/makesecrets_1_1.png b/docs/multitenant/ords-based/images/makesecrets_1_1.png new file mode 100644 index 00000000..f0f6f215 Binary files /dev/null and b/docs/multitenant/ords-based/images/makesecrets_1_1.png differ diff --git a/docs/multitenant/ords-based/openssl_schema.jpg b/docs/multitenant/ords-based/openssl_schema.jpg new file mode 100644 index 00000000..4453d52f Binary files /dev/null and b/docs/multitenant/ords-based/openssl_schema.jpg differ diff --git a/docs/multitenant/ords-based/provisioning/example_setup_using_oci_oke_cluster.md b/docs/multitenant/ords-based/provisioning/example_setup_using_oci_oke_cluster.md new file mode 100644 index 00000000..d56efacb --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/example_setup_using_oci_oke_cluster.md @@ -0,0 +1,38 @@ +# Example of a working setup using OCI OKE(Kubernetes Cluster) and a CDB in Cloud (OCI Exadata Database Cluster) + +In this example, the target CDB (for which the PDB life cycle management is needed) is running in a Cloud environment (OCI's [Oracle Exadata Database Service](https://docs.oracle.com/en-us/iaas/exadatacloud/index.html)) and to manage its PDBs, the Oracle DB Operator is running on a Kubernetes Cluster running in cloud (OCI's [Container Engine for Kubernetes or OKE](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm#Overview_of_Container_Engine_for_Kubernetes)). + + +## High Level plans for this setup + +Below are the main steps that will be involved in this setup: + +- Setup VCN, Add security lists +- Setup OKE cluster with custom settings +- Install Oracle Database Operator on OKE Cluster +- Install ords controller definition +- Manager pdb life cycle management. + + +## OKE Cluster + +Check the [Oracle Documentation](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengnetworkconfigexample.htm#example-privatek8sapi-privateworkers-publiclb) for the OKE rules settings. + +Create OKE cluster with CUSTOM option to use same VCN where ExaCS is provisioned. + +**NOTE:** Make sure you choose same VCN exaphxvcn where ExaCS is provisioned. + +After this, setup kubeconfig & validate cluster access as well as worker node access via ssh. + +For example, you should be able to check the available OKE nodes using "kubectl" as below: + +``` +% kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +192.168.194.163 Ready node 3d19h v1.23.4 192.168.194.163 XX.XX.XX.XX Oracle Linux Server 7.9 5.4.17-2136.306.1.3.el7uek.x86_64 cri-o://1.23.2 +192.168.194.169 Ready node 3d19h v1.23.4 192.168.194.169 XX.XX.XX.XX Oracle Linux Server 7.9 5.4.17-2136.306.1.3.el7uek.x86_64 cri-o://1.23.2 +192.168.194.241 Ready node 3d19h v1.23.4 192.168.194.241 XX.XX.XX.XX Oracle Linux Server 7.9 5.4.17-2136.306.1.3.el7uek.x86_64 cri-o://1.23.2 +``` + +Once this setup is ready, you can proceed with the installation of [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) to use the Oracle On-prem controller to manage PDBs in this CDB. + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml new file mode 100644 index 00000000..8ace42e8 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "[...]" + key: "[...]" + ordsPwd: + secret: + secretName: "[...]" + key: "[...]" + cdbAdminUser: + secret: + secretName: "[...]" + key: "[...]" + cdbAdminPwd: + secret: + secretName: "[...]" + key: "[...]" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml new file mode 100644 index 00000000..4dac1aea --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml @@ -0,0 +1,54 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdb2_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + action: "Clone" diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml new file mode 100644 index 00000000..44b1a086 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml new file mode 100644 index 00000000..2bf2189b --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml new file mode 100644 index 00000000..296c9feb --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml @@ -0,0 +1,39 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml new file mode 100644 index 00000000..9f85f0b5 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml new file mode 100644 index 00000000..10719ccc --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml @@ -0,0 +1,51 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + action: "Plug" + assertivePdbDeletion: true + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + + + diff --git a/docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml new file mode 100644 index 00000000..f30f2699 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml @@ -0,0 +1,43 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/ords-based/provisioning/ords_image.md b/docs/multitenant/ords-based/provisioning/ords_image.md new file mode 100644 index 00000000..e2d1dcef --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/ords_image.md @@ -0,0 +1,81 @@ + + +# Build ORDS Docker Image + +This file contains the steps to create an ORDS based image to be used solely by the PDB life cycle multitentant controllers. + +**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. + +#### Clone the software using git: + +> Under directory ./oracle-database-operator/ords you will find the [Dockerfile](../../../ords/Dockerfile) and [runOrdsSSL.sh](../../../ords/runOrdsSSL.sh) required to build the image. + +```sh + git clone git@orahub.oci.oraclecorp.com:rac-docker-dev/oracle-database-operator.git + cd oracle-database-operator/ords/ +``` + +#### Login to the registry: container-registry.oracle.com + +**NOTE:** To login to this registry, you will need to the URL https://container-registry.oracle.com , Sign in, then click on "Java" and then accept the agreement. + +```bash +docker login container-registry.oracle.com +``` + +#### Login to the your container registry + +Login to a repo where you want to push your docker image (if needed) to pull during deployment in your environment. + +```bash +docker login +``` + +#### Build the image + +Build the docker image by using below command: + +```bash +docker build -t oracle/ords-dboper:latest . +``` +> If your are working behind a proxy mind to specify https_proxy and http_proxy during image creation + +Check the docker image details using: + +```bash +docker images +``` + +> OUTPUT EXAMPLE +```bash +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/ords-dboper latest fdb17aa242f8 4 hours ago 1.46GB + +``` + +#### Tag and push the image + +Tag and push the image to your image repository. + +NOTE: We have the repo as `phx.ocir.io//oracle/ords:latest`. Please change as per your environment. + +```bash +docker tag oracle/ords-dboper:ords-latest phx.ocir.io//oracle/ords:latest +docker push phx.ocir.io//oracle/ords:latest +``` + +#### In case of private image + +If you the image not be public then yuo need to create a secret containing the password of your image repository. +Create a Kubernetes Secret for your docker repository to pull the image during deployment using the below command: + +```bash +kubectl create secret generic container-registry-secret --from-file=.dockerconfigjson=./.docker/config.json --type=kubernetes.io/dockerconfigjson -n oracle-database-operator-system +``` + +Use the parameter `ordsImagePullSecret` to specify the container secrets in pod creation yaml file + +#### [Image createion example](../usecase01/logfiles/BuildImage.log) + + + diff --git a/docs/multitenant/ords-based/provisioning/quickOKEcreation.md b/docs/multitenant/ords-based/provisioning/quickOKEcreation.md new file mode 100644 index 00000000..19d9323e --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/quickOKEcreation.md @@ -0,0 +1,136 @@ + + +### Quick Oke creation script + +Use this script to create quickly an OKE cluster in your OCI. + +#### Prerequisties: +- ocicli is properly configured on your client +- make is installed on your client +- vnc is already configured +- ssh key is configured (public key available under directory ~/.ssh) +- edit make providing all the information about your compartment, vnc,subnet,lb subnet and nd subnet (exported variables in the header section) + + +#### Execution: + +```bash +make all +``` + +Monitor the OKE from OCI console + +#### Makefile +```makefile +.EXPORT_ALL_VARIABLES: + +export CMPID=[.... COMPARTMENT ID.............] +export VNCID=[.... VNC ID ....................] +export ENDID=[.... SUBNET END POINT ID .......] +export LBSID=[.....LB SUBNET ID...............] +export NDSID=[.....NODE SUBNET ID.............] + + +#ssh public key +export KEYFL=~/.ssh/id_rsa.pub + +#cluster version +export KSVER=v1.27.2 + +#cluster name +export CLUNM=myoke + +#pool name +export PLNAM=Pool1 + +#logfile +export LOGFILE=./clustoke.log + +#shape +export SHAPE=VM.Standard.E4.Flex + +OCI=/home/oracle/bin/oci +CUT=/usr/bin/cut +KUBECTL=/usr/bin/kubectl +CAT=/usr/bin/cat + +all: cluster waitcluster pool waitpool config desccluster + +cluster: + @echo " - CREATING CLUSTER " + @$(OCI) ce cluster create \ + --compartment-id $(CMPID) \ + --kubernetes-version $(KSVER) \ + --name $(CLUNM) \ + --vcn-id $(VNCID) \ + --endpoint-subnet-id $(ENDID) \ + --service-lb-subnet-ids '["'$(LBSID)'"]' \ + --endpoint-public-ip-enabled true \ + --persistent-volume-freeform-tags '{"$(CLUNM)" : "OKE"}' 1>$(LOGFILE) 2>&1 + +waitcluster: + @while [ `$(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id \ + --raw-output |wc -l ` -eq 0 ] ; do sleep 5 ; done + @echo " - CLUSTER CREATED" + + +pool: + @echo " - CREATING POOL" + @$(eval PBKEY :=$(shell $(CAT) $(KEYFL)|grep -v " PUBLIC KEY")) + @$(OCI) ce node-pool create \ + --cluster-id `$(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id --raw-output` \ + --compartment-id $(CMPID) \ + --kubernetes-version $(KSVER) \ + --name $(PLNAM) \ + --node-shape $(SHAPE) \ + --node-shape-config '{"memoryInGBs": 8.0, "ocpus": 1.0}' \ + --node-image-id `$(OCI) compute image list \ + --operating-system 'Oracle Linux' --operating-system-version 7.9 \ + --sort-by TIMECREATED --compartment-id $(CMPID) --shape $(SHAPE) \ + --query data[1].id --raw-output` \ + --node-boot-volume-size-in-gbs 50 \ + --ssh-public-key "$(PBKEY)" \ + --size 3 \ + --placement-configs '[{"availabilityDomain": "'`oci iam availability-domain list \ + --compartment-id $(CMPID) \ + --query data[0].name --raw-output`'", "subnetId": "'$(NDSID)'"}]' 1>>$(LOGFILE) 2>&1 + +waitpool: + $(eval CLSID :=$(shell $(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id --raw-output)) + @while [ `$(OCI) ce node-pool list --compartment-id $(CMPID) \ + --lifecycle-state ACTIVE --cluster-id $(CLSID) \ + --query data[0].id --raw-output |wc -l ` -eq 0 ] ; do sleep 5 ; done + @sleep 10 + $(eval PLLID :=$(shell $(OCI) ce node-pool list --compartment-id $(CMPID) \ + --lifecycle-state ACTIVE --cluster-id $(CLSID) --query data[0].id --raw-output)) + @echo " - POOL CREATED" + +config: + @$(OCI) ce cluster create-kubeconfig --cluster-id \ + `$(OCI) ce cluster list \ + --compartment-id $(CMPID) --name $(CLUNM) --lifecycle-state ACTIVE \ + --query data[0].id --raw-output` \ + --file $(HOME)/.kube/config --region \ + `$(OCI) ce cluster list \ + --compartment-id $(CMPID) --name $(CLUNM) --lifecycle-state ACTIVE \ + --query data[0].id --raw-output|$(CUT) -f4 -d. ` \ + --token-version 2.0.0 --kube-endpoint PUBLIC_ENDPOINT + @echo " - KUBECTL PUBLIC ENDPOINT CONFIGURED" + + +desccluster: + @$(eval TMPSP := $(shell date "+%y/%m/%d:%H:%M" )) + $(KUBECTL) get nodes -o wide + $(KUBECTL) get storageclass + +checkvol: + $(OCI) bv volume list \ + --compartment-id $(CMPID) \ + --lifecycle-state AVAILABLE \ + --query 'data[?"freeform-tags".stackgres == '\''OKE'\''].id' +``` + + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml new file mode 100644 index 00000000..5e020de6 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "[...]" + key: "[...]" + ordsPwd: + secret: + secretName: "[...]" + key: "[...]" + cdbAdminUser: + secret: + secretName: "[...]" + key: "[...]" + cdbAdminPwd: + secret: + secretName: "[...]" + key: "[...]" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + cdbTlsKey: + secret: + secretName: "[...]" + key: "[...]" + cdbTlsCrt: + secret: + secretName: "[...]" + key: "[...]" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_secret.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_secret.yaml new file mode 100644 index 00000000..567b90a4 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: cdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + ords_pwd: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + cdbadmin_user: ".....base64 encoded password...." + cdbadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml new file mode 100644 index 00000000..964d1e5e --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml @@ -0,0 +1,60 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdb2_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + action: "Clone" diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml new file mode 100644 index 00000000..06d92469 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml new file mode 100644 index 00000000..2744223e --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + assertivePdbDeletion: true + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml new file mode 100644 index 00000000..523ac1cb --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml @@ -0,0 +1,39 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml new file mode 100644 index 00000000..866db3e4 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "[...]" + key: "[...]" + adminPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml new file mode 100644 index 00000000..e6605276 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml @@ -0,0 +1,55 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + action: "Plug" + assertivePdbDeletion: true + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_secret.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_secret.yaml new file mode 100644 index 00000000..60d95d76 --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_secret.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: pdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + sysadmin_user: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." + diff --git a/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml new file mode 100644 index 00000000..4e404efe --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml @@ -0,0 +1,49 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/ords-based/usecase/README.md b/docs/multitenant/ords-based/usecase/README.md new file mode 100644 index 00000000..b6f5e590 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/README.md @@ -0,0 +1,112 @@ + + + +# Use case directory + +The use case directory contains the yaml files to test the multitenant controller functionalities: create ords pod and pdb operation *create / open / close / unplug / plug / delete / clone /map / parameter session* +In this exampl the cdb and pdbs resources are depolyed in different namespaces + +## Makefile helper + +Customizing yaml files (tns alias / credential / namespaces name etc...) is a long procedure prone to human error. A simple [makefile](../usecase/makefile) is available to quickly and safely configure yaml files with your system environment information. Just edit the [parameter file](../usecase/parameters.txt) before proceding. + +```text +[👉 CHECK PARAMETERS..................] +TNSALIAS...............:(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELA.... +ORDPWD.................:[Password for ORDS_PUBLIC_USER ] +SYSPWD.................:[SYS password] +WBUSER.................:[username for https authentication] +WBPASS.................:[password for https authentication] +PDBUSR.................:[pdb admin user] +PDBPWD.................:[pdb admin password] +CDBUSR.................:[cdb admin user e.g. C##DBAPI_CDB_ADMIN] +CDBPWD.................:[cdb admin password] +PDBNAMESPACE...........:[namespace for pdb] +CDBNAMESPACE...........:[namespace for cdb] +COMPANY................:oracle +APIVERSION.............:v4 ---> do not edit +``` + +⚠ **WARNING: The makefile is intended to speed up the usecase directory configuartion only, it is not supported, the editing and configuration of yaml files for production system is left up to the end user** + +### Pre requisistes: + +- Make sure that **kubectl** is properly configured. +- Make sure that all requirements listed in the [operator installation page](../../../../docs/installation/OPERATOR_INSTALLATION_README.md) are implemented. (role binding,webcert,etc) +- Make sure that administrative user on the container database is configured as documented. + +### Commands + +Review your configuraton running ```make check```; if all the parameters are correct then you can proceed with yaml files and certificates generation + +By excuting command ```make operator``` You will have in your directory an operator yaml file with the WATCH LIST required to operate with multiple namespaces. +Note that the yaml file is not applyed; you need to manually execute ```kubectl apply -f oracle-database-operator.yaml```. + +```bash +make operator +``` +You can generate all the other yaml files for pdb life cycle management using ```make genyaml``` + +```bash +make genyaml +``` + +list of generated yaml files + +```text +-rw-r--r-- 1 mmalvezz g900 137142 Nov 13 09:35 oracle-database-operator.yaml +-rw-r--r-- 1 mmalvezz g900 321 Nov 13 10:27 create_cdb_secrets.yaml +-rw-r--r-- 1 mmalvezz g900 234 Nov 13 10:27 create_pdb_secrets.yaml +-rw-r--r-- 1 mmalvezz g900 381 Nov 13 10:27 pdbnamespace_binding.yaml +-rw-r--r-- 1 mmalvezz g900 381 Nov 13 10:27 cdbnamespace_binding.yaml +-rw-r--r-- 1 mmalvezz g900 1267 Nov 13 10:27 create_ords_pod.yaml +-rw-r--r-- 1 mmalvezz g900 935 Nov 13 10:27 create_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 935 Nov 13 10:27 create_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 open_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 open_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 845 Nov 13 10:27 open_pdb3_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 close_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 close_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 846 Nov 13 10:27 close_pdb3_resource.yaml +-rw-r--r-- 1 mmalvezz g900 927 Nov 13 10:27 clone_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 928 Nov 13 10:27 clone_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 802 Nov 13 10:27 delete_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 802 Nov 13 10:27 delete_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 824 Nov 13 10:27 unplug_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 992 Nov 13 10:27 plug_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 887 Nov 13 10:27 map_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 887 Nov 13 10:27 map_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 890 Nov 13 10:27 map_pdb3_resource.yaml +``` + +The command ```make secretes ``` will configure database secrets credential and certificates secretes + +```bash +make secrets +``` + + + +The makefile includes other different targets that can be used to test the various pdb operations available. E.g. + +```makefile +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) +``` +The target ```run03.2``` clones pdb2 into pdb4 and wait for ```$TEST_EXEC_TIMEOUT``` for the operation to complete. + +### Output executions:. + +```make secrets``` + +![image](../images/makesecrets_1_1.png) + + + +```make runall``` executes different pdb operations including the cdb controller creation + +![image](../images/makerunall.png) \ No newline at end of file diff --git a/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml b/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml new file mode 100644 index 00000000..5fd355f4 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: cdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml new file mode 100644 index 00000000..5723f7c6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml new file mode 100644 index 00000000..2b9fc70a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb4 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml new file mode 100644 index 00000000..ae837ce0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml new file mode 100644 index 00000000..1b5d1324 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml new file mode 100644 index 00000000..f4a32938 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_ords_pod.yaml b/docs/multitenant/ords-based/usecase/create_ords_pod.yaml new file mode 100644 index 00000000..ad196c9d --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_ords_pod.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + ordsImage: _your_container_registry/ords-dboper:latest + ordsImagePullPolicy: "Always" + dbTnsurl : "T H I S I S J U S T A N E X A M P L E (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml new file mode 100644 index 00000000..84e910e0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml new file mode 100644 index 00000000..0a71c7c3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml new file mode 100644 index 00000000..3aba580c --- /dev/null +++ b/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml new file mode 100644 index 00000000..59b50a64 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/makefile b/docs/multitenant/ords-based/usecase/makefile new file mode 100644 index 00000000..dc881598 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/makefile @@ -0,0 +1,915 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# 3) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 4) make runall01 +# Start a series of operation create open close delete and so on +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# cdbnamespace_binding.yaml : role binding for cdbnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_ords_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_pdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export ORDPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDPWD|cut -d : -f 2) +export SYSPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep SYSPWD|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export CDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBUSR|cut -d : -f 2) +export CDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBPWD|cut -d : -f 2) +export PDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) +export CDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBNAMESPACE|cut -d : -f 2) +export ORDSIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDSIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m +export IMAGE=oracle/ords-dboper:latest +export ORDSIMGDIR=../../../../ords + +REST_SERVER=ords +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +RUNTIME=/usr/bin/podman + +################# +### FILE LIST ### +################# + +export ORDS_POD=create_ords_pod.yaml + +export CDB_SECRETS=create_cdb_secrets.yaml +export PDB_SECRETS=create_pdb_secrets.yaml + +export PDBCRE1=create_pdb1_resource.yaml +export PDBCRE2=create_pdb2_resource.yaml + +export PDBCLOSE1=close_pdb1_resource.yaml +export PDBCLOSE2=close_pdb2_resource.yaml +export PDBCLOSE3=close_pdb3_resource.yaml + +export PDBOPEN1=open_pdb1_resource.yaml +export PDBOPEN2=open_pdb2_resource.yaml +export PDBOPEN3=open_pdb3_resource.yaml + +export PDBCLONE1=clone_pdb1_resource.yaml +export PDBCLONE2=clone_pdb2_resource.yaml + +export PDBDELETE1=delete_pdb1_resource.yaml +export PDBDELETE2=delete_pdb2_resource.yaml +export PDBDELETE3=delete_pdb3_resource.yaml + +export PDBUNPLUG1=unplug_pdb1_resource.yaml +export PDBPLUG1=plug_pdb1_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +check: + $(call msg,"CHECK PARAMETERS") + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "ORDPWD.................:%s\n" $(ORDPWD) + @printf "SYSPWD.................:%s\n" $(SYSPWD) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "CDBUSR.................:%s\n" $(CDBUSR) + @printf "CDBPWD.................:%s\n" $(CDBPWD) + @printf "PDBNAMESPACE...........:%s\n" $(PDBNAMESPACE) + @printf "CDBNAMESPACE...........:%s\n" $(CDBNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + + +tlscrt: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(CDBNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(CDBNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +tlssec: + $(call msg,"GENERATE TLS SECRET") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(CDBNAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(PDBNAMESPACE) + + +delsec: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(PDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + $(eval SECRETSL:=$(shell kubectl get secrets -n $(CDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(PDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(PDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + @[ "${SECRETSL}" ] && ( \ + printf "Deleteing secrets in namespace -n $(CDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSL) -n $(CDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + + +###### ENCRYPTED SECRETS ###### +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + + + +secrets: delsec tlscrt tlssec + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey="$(PRVKEY)" -n $(PDBNAMESPACE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(CDBPWD) > $(CDBPWDFILE) + @$(ECHO) $(CDBUSR) > $(CDBUSRFILE) + @$(ECHO) $(SYSPWD) > $(SYSPWDFILE) + @$(ECHO) $(ORDPWD) > $(ORDPWDFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBPWDFILE) |base64 > e_$(CDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBUSRFILE) |base64 > e_$(CDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE) |base64 > e_$(SYSPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic cdbpwd --from-file=e_$(CDBPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic cdbusr --from-file=e_$(CDBUSRFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic syspwd --from-file=e_$(SYSPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic ordpwd --from-file=e_$(ORDPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(PDBNAMESPACE) + $(RM) $(WBUSERFILE) $(WBPASSFILE) $(CDBPWDFILE) $(CDBUSRFILE) $(SYSPWDFILE) $(ORDPWDFILE) $(PDBUSRFILE) $(PDBPWDFILE) + $(RM) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(CDBPWDFILE) e_$(CDBUSRFILE) e_$(SYSPWDFILE) e_$(ORDPWDFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + + +### YAML FILE SECTION ### +operator: + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE),$(PDBNAMESPACE),$(CDBNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection01.yaml + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +cat< authsection02.yaml + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${PDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${PDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${CDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${CDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + +#echo ords pod creation +define _script01 +cat < ${ORDS_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: CDB +metadata: + name: cdb-dev + namespace: ${CDBNAMESPACE} +spec: + cdbName: "DB12" + ordsImage: ${ORDSIMG} + ordsImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true +EOF + +cat authsection01.yaml >> ${ORDS_POD} + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${PDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${PDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${PDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${PDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + +cat < ${PDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb4 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + + +cat < ${PDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" +EOF + +cat <${PDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" +EOF + +cat <${PDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${PDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${PDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +## Auth information +for _file in ${PDBCRE1} ${PDBCRE2} ${PDBOPEN1} ${PDBOPEN2} ${PDBOPEN3} ${PDBCLOSE1} ${PDBCLOSE2} ${PDBCLOSE3} ${PDBCLONE1} ${PDBCLONE2} ${PDBDELETE1} ${PDBDELETE2} ${PDBUNPLUG1} ${PDBPLUG1} ${PDBMAP1} ${PDBMAP2} ${PDBMAP3} +do +ls -ltr ${_file} + cat authsection02.yaml >> ${_file} +done +rm authsection02.yaml +rm authsection01.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(PDBMAP3) $(PDBMAP2) $(PDBMAP1) $(PDBPLUG1) $(PDBUNPLUG1) $(PDBDELETE2) $(PDBDELETE1) $(PDBCLONE2) $(PDBCLONE1) $(PDBCLOSE3) $(PDBCLOSE2) $(PDBCLOSE1) $(PDBOPEN3) $(PDBOPEN2) $(PDBOPEN1) $(PDBCRE2) $(PDBCRE1) $(ORDS_POD) $(CDB_SECRETS) $(PDB_SECRETS) + - $(RM) ${PDBNAMESPACE}_binding.yaml ${CDBNAMESPACE}_binding.yaml + + +cleancrt: + - $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(CDBNAMESPACE)|grep ords|cut -d ' ' -f 1` -n $(CDBNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"cdb pod creation") + - $(KUBECTL) delete cdb cdb-dev -n $(CDBNAMESPACE) + $(KUBECTL) apply -f $(ORDS_POD) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" cdb cdb-dev -n $(CDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"cdb pod completed") + $(KUBECTL) get cdb -n $(CDBNAMESPACE) + $(KUBECTL) get pod -n $(CDBNAMESPACE) + +run01.1: + @$(call msg,"pdb pdb1 creation") + $(KUBECTL) apply -f $(PDBCRE1) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 creation completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run01.2: + @$(call msg, "pdb pdb2 creation") + $(KUBECTL) apply -f $(PDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb2 creation completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + +run02.1: + @$(call msg, "pdb pdb1 open") + $(KUBECTL) apply -f $(PDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 open completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run02.2: + @$(call msg,"pdb pdb2 open") + $(KUBECTL) apply -f $(PDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 open completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(PDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb3 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) + + +run04.1: + @$(call msg,"pdb pdb1 close") + $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 close completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run04.2: + @$(call msg,"pdb pdb2 close") + $(KUBECTL) apply -f $(PDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 close completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + +run05.1: + @$(call msg,"pdb pdb1 unplug") + $(KUBECTL) apply -f $(PDBUNPLUG1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 unplug completed") + +run06.1: + @$(call msg, "pdb pdb1 plug") + $(KUBECTL) apply -f $(PDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 plug completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run07.1: + @$(call msg,"pdb pdb1 delete ") + - $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(PDBDELETE1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 delete") + $(KUBECTL) get pdb -n $(PDBNAMESPACE) + +run99.1: + $(KUBECTL) delete cdb cdb-dev -n cdbnamespace + $(KUBECTL) wait --for=delete cdb cdb-dev -n $(CDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get cdb -n cdbnamespaace + $(KUBECTL) get pdb -n pdbnamespaace + + +## SEQ | ACTION +## ----+---------------- +## 00 | create ords pod +## 01 | create pdb +## 02 | open pdb +## 03 | clone pdb +## 04 | close pdb +## 05 | unpug pdb +## 06 | plug pdb +## 07 | delete pdb (declarative) + + +runall01: run00 run01.1 run01.2 run03.1 run03.2 run04.1 run05.1 run06.1 run02.1 run07.1 + + +###### BUILD ORDS IMAGE ###### + +createimage: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) + +createimageproxy: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) --build-arg https_proxy=$(HTTPS_PROXY) --build-arg http_proxy=$(HTTP_PROXY) + +tagimage: + @echo "TAG IMAGE" + $(RUNTIME) tag $(IMAGE) $(ORDSIMG) + +push: + $(RUNTIME) push $(ORDSIMG) + + diff --git a/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml new file mode 100644 index 00000000..b71b59d5 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml new file mode 100644 index 00000000..75d056d0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml new file mode 100644 index 00000000..3523aa68 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml new file mode 100644 index 00000000..93a1d43a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml new file mode 100644 index 00000000..deb27f9a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml new file mode 100644 index 00000000..586f2f57 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/parameters.txt b/docs/multitenant/ords-based/usecase/parameters.txt new file mode 100644 index 00000000..64dc3759 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/parameters.txt @@ -0,0 +1,61 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +ORDSIMG:_your_container_registry/ords-dboper:latest + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"T H I S I S J U S T A N E X A M P L E (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## ORDS PUBLIC USER ## +########################################### +ORDPWD:change_me_please + +########################################### +## SYSPASSWORD ## +########################################### +SYSPWD:change_me_please + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:change_me_please +WBPASS:change_me_please + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:change_me_please +PDBPWD:change_me_please + +##################### +## CDB ADMIN USER ### +##################### + +CDBUSR:C##DBAPI_CDB_ADMIN +CDBPWD:change_me_please + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +CDBNAMESPACE:cdbnamespace + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml b/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml new file mode 100644 index 00000000..5af79ed6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: pdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml new file mode 100644 index 00000000..9eb5ed77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml @@ -0,0 +1,53 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml new file mode 100644 index 00000000..0036d5f7 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/README.md b/docs/multitenant/ords-based/usecase01/README.md new file mode 100644 index 00000000..0020541c --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/README.md @@ -0,0 +1,516 @@ + + + +# STEP BY STEP USE CASE + +- [STEP BY STEP USE CASE](#step-by-step-use-case) + - [INTRODUCTION](#introduction) + - [OPERATIONAL STEPS](#operational-steps) + - [Download latest version from github ](#download-latest-version-from-github-) + - [Upload webhook certificates ](#upload-webhook-certificates-) + - [Create the dboperator ](#create-the-dboperator-) + - [Create secret for container registry](#create-secret-for-container-registry) + - [Build ords immage ](#build-ords-immage-) + - [Database Configuration](#database-configuration) + - [Create CDB secret](#create-cdb-secret) + - [Create Certificates](#create-certificates) + - [Apply cdb.yaml](#apply-cdbyaml) + - [CDB - Logs and throuble shutting](#cdb---logs-and-throuble-shutting) + - [Create PDB secret](#create-pdb-secret) + - [Apply pdb yaml file to create pdb](#apply-pdb-yaml-file-to-create-pdb) + - [Other actions](#other-actions) + - [Imperative approach on pdb deletion - will be avilable in 1.2.0 ](#imperative-approach-on-pdb-deletion) + + + +##### INTRODUCTION + +This readme is a step by step guide used to implement database multi tenant operator. It assumes that a kubernets cluster and a database server are already available (no matter if single instance or RAC). kubectl must be configured in order to reach k8s cluster. + +The following table reports the parameters required to configure and use oracle multi tenant controller for pluggable database lifecycle management. + +| yaml file parameters | value | description /ords parameter | +|-------------- |--------------------------- |-------------------------------------------------| +| dbserver | or | [--db-hostname][1] | +| dbTnsurl | | [--db-custom-url/db.customURL][dbtnsurl] | +| port | | [--db-port][2] | +| cdbName | | Container Name | +| name | | Ords podname prefix in cdb.yaml | +| name | | pdb resource in pdb.yaml | +| ordsImage | /ords-dboper:latest|My public container registry | +| pdbName | | Pluggable database name | +| servicename | | [--db-servicename][3] | +| sysadmin_user | | [--admin-user][adminuser] | +| sysadmin_pwd | | [--password-stdin][pwdstdin] | +| cdbadmin_user | | [db.cdb.adminUser][1] | +| cdbadmin_pwd | | [db.cdb.adminUser.password][cdbadminpwd] | +| webserver_user| | [https user][http] NOT A DB USER | +| webserver_pwd | | [http user password][http] | +| ords_pwd | | [ORDS_PUBLIC_USER password][public_user] | +| pdbTlsKey | | [standalone.https.cert.key][key] | +| pdbTlsCrt | | [standalone.https.cert][cr] | +| pdbTlsCat | | certificate authority | +| cdbOrdsPrvKey | | private key (cdb crd) | +| pdbOrdsPrvKey | | private key (pdb crd) | +| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | + +> A [makfile](./makefile) is available to sped up the command execution for the multitenant setup and test. See the comments in the header of file + +### OPERATIONAL STEPS +---- + + +#### Download latest version from github + + +```bash +git clone https://github.com/oracle/oracle-database-operator.git +``` + +If golang compiler is installed on your environment and you've got a public container registry then you can compile the operator, upload to the registry and use it + +```bash + +cd oracle-database-operator +make generate +make manifests +make install +make docker-build IMG=/operator:latest + +make operator-yaml IMG=operator:latest +``` + +> **NOTE:** The last make executions recreates the **oracle-database-operator.yaml** with the **image:** parameter pointing to your public container registry. If you don't have a golang compilation environment you can use the **oracle-database-operator.yaml** provided in the github distribution. Check [operator installation documentation](../installation/OPERATOR_INSTALLATION_README.md ) for more details. + +> **NOTE:** If you are using oracle-container-registry make sure to accept the license agreement otherwise the operator image pull fails. +---- + +#### Upload webhook certificates + +```bash +kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +``` + +#### Create the dboperator + +```bash +cd oracle-database-operator +/usr/bin/kubectl apply -f oracle-database-operator.yaml +``` ++ Check the status of the operator + +```bash +/usr/bin/kubectl get pods -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +oracle-database-operator-controller-manager-557ff6c659-g7t66 1/1 Running 0 10s +oracle-database-operator-controller-manager-557ff6c659-rssmj 1/1 Running 0 10s +oracle-database-operator-controller-manager-557ff6c659-xpswv 1/1 Running 0 10s + +``` +---- + +#### Create secret for container registry + ++ Make sure to login to your container registry and then create the secret for you container registry. + +```bash +docker login **** +/usr/bin/kubectl create secret generic container-registry-secret --from-file=.dockerconfigjson=/home/oracle/.docker/config.json --type=kubernetes.io/dockerconfigjson -n oracle-database-operator-system +``` + ++ Check secret + +```bash +kubectl get secret -n oracle-database-operator-system +NAME TYPE DATA AGE +container-registry-secret kubernetes.io/dockerconfigjson 1 19s +webhook-server-cert kubernetes.io/tls +``` +---- + +#### Build ords immage + ++ Build the ords image, downloading ords software is no longer needed; just build the image and push it to your repository + +```bash +cd oracle-database-operator/ords +docker build -t oracle/ords-dboper:latest . +``` + +[Example of execution](./logfiles/BuildImage.log) ++ Login to your container registry and push the ords image. + +```bash +docker tag /ords-dboper:latest +docker push /ords-dboper:latest +``` +[Example of execution](./logfiles/tagandpush.log) + +---- + +#### Database Configuration + ++ Configure Database + +Connect as sysdba and execute the following script in order to create the required ords accounts. + +```sql +ALTER SESSION SET "_oracle_script"=true; +DROP USER cascade; +CREATE USER IDENTIFIED BY CONTAINER=ALL ACCOUNT UNLOCK; +GRANT SYSOPER TO CONTAINER = ALL; +GRANT SYSDBA TO CONTAINER = ALL; +GRANT CREATE SESSION TO CONTAINER = ALL; +``` +---- +#### Create Certificates + ++ Create certificates: At this stage we need to create certificates on our local machine and upload into kubernetes cluster by creating new secrets. + + + +```text + + +-----------+ + | openssl | + +-----------+ + | + | + +-----------+ + | tls.key | + | tls.crt +------------+ + | ca.crt | | + +-----------+ | + | + +------------------------|---------------------------+ + |KUBERNETES +------+--------+ | + |CLUSTER +---|kubernet secret|---+ | + | | +---------------+ | | + | | | | + | +----------+---+ https +--+----------+ | + | |ORDS CONTAINER|<-------------->| PDB/POD | | + | +----------+---+ +-------------+ | + | cdb.yaml | pdb.yaml | + +-------------|--------------------------------------+ + | + | + +-----------+ + | DB SERVER | + +-----------+ + +``` + +```bash + +openssl genrsa -out 2048 +openssl req -new -x509 -days 365 -key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=oracle Root CA" -out +openssl req -newkey rsa:2048 -nodes -keyout -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=-ords" -out server.csr +/usr/bin/echo "subjectAltName=DNS:-ords,DNS:www.example.com" > extfile.txt +openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA -CAkey -CAcreateserial -out + +kubectl create secret tls db-tls --key="" --cert="" -n oracle-database-operator-system +kubectl create secret generic db-ca --from-file= -n oracle-database-operator-system + +``` + +[Example of execution:](./logfiles/openssl_execution.log) + +#### CDB and PDB credential + +Refer to the [landing page](../README.md) to implement openssl encrpted secrets. + +---- + +#### Apply cdb.yaml + + +**note:** + Before creating the CDB pod make sure that all the pluggable databases in the container DB are open. + + ++ Create ords container + +```bash +/usr/bin/kubectl apply -f create_ords_pod.yaml -n oracle-database-operator-system +``` +Example: **create_ords_pod.yaml** + +```yaml +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + + +``` +> **Note** if you are working in dataguard environment with multiple sites (AC/DR) specifying the host name (dbServer/dbPort/serviceName) may not be the suitable solution for this kind of configuration, use **dbTnsurl** instead. Specify the whole tns string which includes the hosts/scan list. + +``` + +----------+ + ____| standbyB | + | | scanB | (DESCRIPTION= + +----------+ | +----------+ (CONNECT_TIMEOUT=90) + | primary |_______| (RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70) + | scanA | | +----------+ (TRANSPORT_CONNECT_TIMEOUT=10)(LOAD_BALLANCE=ON) + +----------+ |___| stanbyC | (ADDRESS=(PROTOCOL=TCP)(HOST=scanA.testrac.com)(PORT=1521)(IP=V4_ONLY)) + | scanC | (ADDRESS=(PROTOCOL=TCP)(HOST=scanB.testrac.com)(PORT=1521)(IP=V4_ONLY)) + +----------+ (ADDRESS=(PROTOCOL=TCP)(HOST=scanC.testrac.com)(PORT=1521)(IP=V4_ONLY)) + (CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) + + + dbtnsurl:((DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(TRANS...... +``` + +[create_ords_pod.yaml example](./create_ords_pod.yaml) + + +---- + +#### CDB - Logs and throuble shutting + ++ Check the status of ords container + +```bash +/usr/bin/kubectl get pods -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +cdb-dev-ords-rs-m9ggp 0/1 ContainerCreating 0 67s <----- +oracle-database-operator-controller-manager-557ff6c659-g7t66 1/1 Running 0 11m +oracle-database-operator-controller-manager-557ff6c659-rssmj 1/1 Running 0 11m +oracle-database-operator-controller-manager-557ff6c659-xpswv 1/1 Running 0 11m +``` ++ Make sure that the cdb container is running + +```bash +/usr/bin/kubectl get pods -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +cdb-dev-ords-rs-dnshz 1/1 Running 0 31s +oracle-database-operator-controller-manager-557ff6c659-9bjfl 1/1 Running 0 2m42s +oracle-database-operator-controller-manager-557ff6c659-cx8hd 1/1 Running 0 2m42s +oracle-database-operator-controller-manager-557ff6c659-rq9xs 1/1 Running 0 2m42s +``` ++ Check the status of the services + +```bash +kubectl get cdb -n oracle-database-operator-system +NAME CDB NAME DB SERVER DB PORT REPLICAS STATUS MESSAGE +[.....................................................] Ready +``` ++ Use log file to trouble shutting + +```bash +/usr/bin/kubectl logs `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system +``` +[Example of cdb creation log](./logfiles/cdb_creation.log) + ++ Test REST API from the pod. By querying the metadata catalog you can verify the status of https setting + +```bash + /usr/bin/kubectl exec -it `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/bin/curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ +``` +[Example of execution](./logfiles/testapi.log) + ++ Verify the pod environment varaibles + ```bash + kubectl set env pods --all --list -n oracle-database-operator-system + ``` + ++ Connect to cdb pod + +```bash + kubectl exec -it `kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system bash +``` ++ Dump ords server configuration + +```bash +/usr/bin/kubectl exec -it `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/local/bin/ords --config /etc/ords/config config list +``` +[Example of executions](./logfiles/ordsconfig.log) + +----- +#### Apply pdb yaml file to create pdb + +```bash +/usr/bin/kubectl apply -f create_pdb1_resource.yaml -n oracle-database-operator-system +``` + +Example: **create_pdb1_resource.yaml** + +```yaml +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +``` + ++ Monitor the pdb creation status until message is success + +```bash +kubectl get pdbs --all-namespaces=true + + +-----------------------------------------+ +-----------------------------------------+ + | STATUS MESSAGE |______\ | STATUS MESSAGE | + | Creating Waiting for PDB to be created | / | Ready Success | + +-----------------------------------------+ +-----------------------------------------+ + +NAMESPACE NAME DBSERVER CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE +oracle-database-operator-system 1G Creating Waiting for PDB to be created + +[wait sometimes] + +kubectl get pdbs --all-namespaces=true +NAMESPACE NAME DBSERVER CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE +oracle-database-operator-system pdb1 READ WRITE 1G Ready Success +``` + +Connect to the hosts and verify the PDB creation. + +```text +[oracle@racnode1 ~]$ sqlplus '/as sysdba' +[...] +Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production +Version 19.15.0.0.0 + + +SQL> show pdbs + + CON_ID CON_NAME OPEN MODE RESTRICTED +---------- ------------------------------ ---------- ---------- + 2 PDB$SEED READ ONLY NO + 3 PDBDEV READ WRITE NO + +``` +Check controller log to debug pluggable database life cycle actions in case of problem + +```bash +kubectl logs -f $(kubectl get pods -n oracle-database-operator-system|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1) -n oracle-database-operator-system +``` + +--- + +#### Other actions + +Configure and use other yaml files to perform pluggable database life cycle managment action **pdb_open.yaml** **pdb_close.yaml** + +> **Note** sql command *"alter pluggable database open instances=all;"* acts only on closed databases, so you don't get any oracle error in case of execution against an pluggable database already opened + +#### Imperative approach on pdb deletion + +If **assertivePdbDeletion** is true then the command execution **kubectl delete pdbs crd_pdb_name** automatically deletes the pluggable database on the container database. By default this option is disabled. You can use this option during **create**,**map**,**plug** and **clone** operation. If the option is disabled then **kubectl delete** only deletes the crd but not the pluggable on the container db. Database deletion uses the option **including datafiles**. +If you drop the CRD without dropping the pluggable database and you need to recreate the CRD then you can use the [pdb_map.yaml](./pdb_map.yaml) + + +[1]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + +[2]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + +[3]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-DAA027FA-A4A6-43E1-B8DD-C92B330C2341:~:text=%2D%2Ddb%2Dservicename%20%3Cstring%3E + +[adminuser]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22:~:text=Table%202%2D6%20Command%20Options%20for%20Uninstall%20CLI + +[public_user]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/using-multitenant-architecture-oracle-rest-data-services.html#GUID-E64A141A-A71F-4979-8D33-C5F8496D3C19:~:text=Preinstallation%20Tasks%20for%20Oracle%20REST%20Data%20Services%20CDB%20Installation + +[key]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=standalone.https.cert.key + +[cr]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0 + +[cdbadminpwd]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=Table%20C%2D1%20Oracle%20REST%20Data%20Services%20Configuration%20Settings + +[pwdstdin]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-88479C84-CAC1-4133-A33E-7995A645EC05:~:text=default%20database%20pool.-,2.1.4.1%20Understanding%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation,-Table%202%2D2 + +[http]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-BEECC057-A8F5-4EAB-B88E-9828C2809CD8:~:text=Example%3A%20delete%20%5B%2D%2Dglobal%5D-,user%20add,-Add%20a%20user + +[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 + +[imperative]:https://kubernetes.io/docs/concepts/overview/working-with-objects/object-management/ + + diff --git a/docs/multitenant/ords-based/usecase01/ca.crt b/docs/multitenant/ords-based/usecase01/ca.crt new file mode 100644 index 00000000..cc9aa8bb --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/ca.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEJTCCAw2gAwIBAgIUNXPtpnNEFBCMcnxRP5kJsBDpafcwDQYJKoZIhvcNAQEL +BQAwgaExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQH +DAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNVBAMMLWNkYi1k +ZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVtIDEcMBoGA1UE +AwwTbG9jYWxob3N0ICBSb290IENBIDAeFw0yNDA4MTIxNTMyMzVaFw0yNTA4MTIx +NTMyMzVaMIGhMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMG +A1UEBwwMU2FuRnJhbmNpc2NvMRAwDgYDVQQKDAdvcmFjbGUgMTYwNAYDVQQDDC1j +ZGItZGV2LW9yZHMub3JhY2xlLWRhdGFiYXNlLW9wZXJhdG9yLXN5c3RlbSAxHDAa +BgNVBAMME2xvY2FsaG9zdCAgUm9vdCBDQSAwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQCmnGVApwUBF1kpqcyr2nYeED0VKvefpoHLtxHSP+vP0lWhW7NU +NJlb1YuUagjJ4/rpGRQmPxcVU51n3aAW3a5qHazIpNxNa3fvgB1rMOPFxGmdel2d +8lIt+u19q19DknX/GNgH9Mog8RcyZyPeA7d2icT8TBo74ognr+8p68O3CjBHQ8EM +SnRQR7/bh1c10Uia317ilKvs+I7oErTq5JFLeIuPDdAJ6UncaeblTf1XJ/1FrpHG +fSS7xmR8x0/MblBQlku4eImYmN35g+eRgf8bLDDwC+GPzDnAqqMLjx6h2N+btDxr +tnn05qyqmN9G08uUlP4d4BXi9ISb/toYypklAgMBAAGjUzBRMB0GA1UdDgQWBBS+ +a4X2XTmdPivdQtqDWNpfOtHypDAfBgNVHSMEGDAWgBS+a4X2XTmdPivdQtqDWNpf +OtHypDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAZIrGBNdSw +pe+1agefHfaR8hjZQiXBxdwHM1gR2LWOaFzMS8Q/eRETHTO6+VwQ0/FNaXbAqgqk +G317gZMXS5ZmXuOi28fTpAQtuzokkEKpoK0puTnbXOKGA2QSbBlpSFPqb3aJXvVt +afXFQb5P/0mhr4kuVt7Ech82WM/o5ryFgObygDayDmLatTp+VaRmBZPksnSMhslq +3zPyS7bx2YhbPTLkDxq8Mfr/Msxme8LvSXUpFf4PpQ5zwp1RE32gekct6eRQLmqU +5LXY2aPtqpMF0fBpcwPWbqA9gOYCRKcvXXIr+u1x8hf6Er6grZegHkM9TQ8s0hJd +sxi5tK0lPMHJ +-----END CERTIFICATE----- diff --git a/docs/multitenant/ords-based/usecase01/ca.key b/docs/multitenant/ords-based/usecase01/ca.key new file mode 100644 index 00000000..1a0ef89d --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAppxlQKcFARdZKanMq9p2HhA9FSr3n6aBy7cR0j/rz9JVoVuz +VDSZW9WLlGoIyeP66RkUJj8XFVOdZ92gFt2uah2syKTcTWt374AdazDjxcRpnXpd +nfJSLfrtfatfQ5J1/xjYB/TKIPEXMmcj3gO3donE/EwaO+KIJ6/vKevDtwowR0PB +DEp0UEe/24dXNdFImt9e4pSr7PiO6BK06uSRS3iLjw3QCelJ3Gnm5U39Vyf9Ra6R +xn0ku8ZkfMdPzG5QUJZLuHiJmJjd+YPnkYH/Gyww8Avhj8w5wKqjC48eodjfm7Q8 +a7Z59OasqpjfRtPLlJT+HeAV4vSEm/7aGMqZJQIDAQABAoIBAGXRGYdjCgnarOBr +Jeq3vIsuvUVcVqs35AYMQFXOPltoXHAZTAPfiQC4BW6TRf+q1MDyVH/y+jZMPNsm +cxjGLDopHFgZd4/QZyDzmAbTf75yA2D7UI6fcV0sBUpRGgx/SqC0HADwtT1gWB6z +LRYWC13jX4AXOcjy7OXj/DIQJDCMivedt3dv0rDWJUcBCnVot5tr6zjycefxGKa8 +mG9LZQb3x71FxwpFUau3WLDSwOjtXCeMytaGXnGmIiofJmXnFi0KA4ApzKL7QV6I +cCBS1WBLLXeVM9vOfrtzKVLWGe0qADyLm35p5Fnl3j+vimkk8h/2DEvCZ75c987m +O3PEgdkCgYEA0Scg+KINTA78sdZL5v2+8fT4b+EfoCgUqfr10ReUPKrz3HfrVHcj +7Vf00RT52TkfmkL3mIdLyBUzQ9vzPgweo1o4yKCKNCpR9G3ydNW+KI5jSYnq2efz +Gpe3wTt+8YoyCgm9eUxNWjfO9fipS91sSotY0PovkBohj9aezfcWp1sCgYEAy+3n +MIvW/9PoYxCvQ9fDGLvx3B4/uy0ZYPh7j5edDuaRzwFd2YXUysXhJVuqTp0KT2tv +dRPFRE9Oq5N8e5ITIUiKLQ5PIRNBZm8CiAof+XS1fIuU+MTDaTfXwyGQo0xSg8MB +ITnJulmUlkcTWEtGyBi9sIjor5ve8kqvyrdAKX8CgYA9ZUUSd0978jJPad6iEf6J +PCXpgaYs91cJhre+BzPmkzA+mZ0lEEwlkdo1vfiRwWj7eYkA50Zhl4eS9e/zWM9t +mEBu9GFdasbf/55amZvWf+W5YpjkGmiMd9jjCjn7YVvLAozyHGngf91q6vGXaYou +X7VUsvxfSqxrcs7vGwc1XQKBgB0qaD80MMqj5v+MGlTsndWCw8OEe/7sI04QG7Pc +rjS8Wyws+NwsXNOnW1z5cDEQGrJjHiyzaCot4YV+cXZG3P+MnV52RnDnjRn2VHla +YVpPC8nFOMgfdAcvWmdo/IOuXbrEf/vdhPFm8G5Ruf2NvpDNoQuHeSfsdgVXEy89 +6CpHAoGBAMZInYD0XjcnZNqiQnQdcIJN3CqDIU76Z45OOpcUrYrvTos2xhGLrRI5 +qrk5Od/sovJfse+oUIIbgsABieqtyfxM03iu8fvbahIY6Un1iw2KN9t+mcPrSZJK +jTXKf7XxZ1+yN9kvohdLc65ySyXFSm++glDq8WGrmnOtLUlr0oMm +-----END RSA PRIVATE KEY----- diff --git a/docs/multitenant/ords-based/usecase01/ca.srl b/docs/multitenant/ords-based/usecase01/ca.srl new file mode 100644 index 00000000..7c9868bb --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/ca.srl @@ -0,0 +1 @@ +77D97AB4C4B6D5A9377B84B455D3E16348C6DE04 diff --git a/docs/multitenant/ords-based/usecase01/cdb_create.yaml b/docs/multitenant/ords-based/usecase01/cdb_create.yaml new file mode 100644 index 00000000..01fc0a18 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/cdb_create.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "cdb1-secret" + key: "sysadmin_pwd" + ordsPwd: + secret: + secretName: "cdb1-secret" + key: "ords_pwd" + cdbAdminUser: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_user" + cdbAdminPwd: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_pwd" + webServerUser: + secret: + secretName: "cdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "cdb1-secret" + key: "webserver_pwd" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + diff --git a/docs/multitenant/ords-based/usecase01/cdb_secret.yaml b/docs/multitenant/ords-based/usecase01/cdb_secret.yaml new file mode 100644 index 00000000..567b90a4 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/cdb_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: cdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + ords_pwd: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + cdbadmin_user: ".....base64 encoded password...." + cdbadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." diff --git a/docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml new file mode 100644 index 00000000..3cc2c3dd --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml new file mode 100644 index 00000000..28a4eab6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb4 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml new file mode 100644 index 00000000..a5c3cf59 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml new file mode 100644 index 00000000..7fa15111 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml new file mode 100644 index 00000000..fa7cf009 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml b/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml new file mode 100644 index 00000000..e39c4c56 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: _your_container_registry/ords-dboper:latest + ordsImagePullPolicy: "Always" + dbTnsurl : "T H I S I S J U S T A N E X A M P L E ....(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml new file mode 100644 index 00000000..044d466b --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml new file mode 100644 index 00000000..eb36aaa2 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml new file mode 100644 index 00000000..b0816929 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml new file mode 100644 index 00000000..d2ad95cc --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/extfile.txt b/docs/multitenant/ords-based/usecase01/extfile.txt new file mode 100644 index 00000000..c51d22a3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/extfile.txt @@ -0,0 +1 @@ +subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com diff --git a/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log b/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log new file mode 100644 index 00000000..f35c66d8 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log @@ -0,0 +1,896 @@ +/usr/bin/docker build -t oracle/ords-dboper:latest ../../../ords +Sending build context to Docker daemon 13.82kB +Step 1/12 : FROM container-registry.oracle.com/java/jdk:latest + ---> b8457e2f0b73 +Step 2/12 : ENV ORDS_HOME=/opt/oracle/ords/ RUN_FILE="runOrdsSSL.sh" ORDSVERSION=23.4.0-8 + ---> Using cache + ---> 3317a16cd6f8 +Step 3/12 : COPY $RUN_FILE $ORDS_HOME + ---> 7995edec33cc +Step 4/12 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install iproute && yum clean all + ---> Running in fe168b01f3ad +Oracle Linux 8 BaseOS Latest (x86_64) 91 MB/s | 79 MB 00:00 +Oracle Linux 8 Application Stream (x86_64) 69 MB/s | 62 MB 00:00 +Last metadata expiration check: 0:00:12 ago on Tue 20 Aug 2024 08:54:50 AM UTC. +Package yum-utils-4.0.21-23.0.1.el8.noarch is already installed. +Package tar-2:1.30-9.el8.x86_64 is already installed. +Package vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 is already installed. +Package procps-ng-3.3.15-14.0.1.el8.x86_64 is already installed. +Package curl-7.61.1-33.el8_9.5.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Arch Version Repository Size +================================================================================ +Installing: + bind-utils x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 453 k + expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k + hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k + lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k + net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k + openssl x86_64 1:1.1.1k-12.el8_9 ol8_baseos_latest 710 k + sudo x86_64 1.9.5p2-1.el8_9 ol8_baseos_latest 1.0 M + tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k + unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k + wget x86_64 1.19.5-12.0.1.el8_10 ol8_appstream 733 k + which x86_64 2.21-20.el8 ol8_baseos_latest 50 k + zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k +Upgrading: + curl x86_64 7.61.1-34.el8 ol8_baseos_latest 352 k + dnf-plugins-core noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 76 k + libcurl x86_64 7.61.1-34.el8 ol8_baseos_latest 303 k + python3-dnf-plugins-core + noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 263 k + yum-utils noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 75 k +Installing dependencies: + bind-libs x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 176 k + bind-libs-lite x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 1.2 M + bind-license noarch 32:9.11.36-16.el8_10.2 ol8_appstream 104 k + fstrm x86_64 0.6.1-3.el8 ol8_appstream 29 k + libmaxminddb x86_64 1.2.0-10.el8_9.1 ol8_appstream 32 k + libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k + protobuf-c x86_64 1.3.0-8.el8 ol8_appstream 37 k + python3-bind noarch 32:9.11.36-16.el8_10.2 ol8_appstream 151 k + python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k + tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M +Installing weak dependencies: + geolite2-city noarch 20180605-1.el8 ol8_appstream 19 M + geolite2-country noarch 20180605-1.el8 ol8_appstream 1.0 M + +Transaction Summary +================================================================================ +Install 24 Packages +Upgrade 5 Packages + +Total download size: 28 M +Downloading Packages: +(1/29): hostname-3.20-6.el8.x86_64.rpm 268 kB/s | 32 kB 00:00 +(2/29): libmetalink-0.1.3-7.el8.x86_64.rpm 257 kB/s | 32 kB 00:00 +(3/29): expect-5.45.4-5.el8.x86_64.rpm 1.4 MB/s | 266 kB 00:00 +(4/29): lsof-4.93.2-1.el8.x86_64.rpm 3.2 MB/s | 253 kB 00:00 +(5/29): net-tools-2.0-0.52.20160912git.el8.x86_ 3.6 MB/s | 322 kB 00:00 +(6/29): python3-ply-3.9-9.el8.noarch.rpm 2.7 MB/s | 111 kB 00:00 +(7/29): openssl-1.1.1k-12.el8_9.x86_64.rpm 10 MB/s | 710 kB 00:00 +(8/29): tree-1.7.0-15.el8.x86_64.rpm 2.2 MB/s | 59 kB 00:00 +(9/29): sudo-1.9.5p2-1.el8_9.x86_64.rpm 14 MB/s | 1.0 MB 00:00 +(10/29): unzip-6.0-46.0.1.el8.x86_64.rpm 6.8 MB/s | 196 kB 00:00 +(11/29): which-2.21-20.el8.x86_64.rpm 2.0 MB/s | 50 kB 00:00 +(12/29): tcl-8.6.8-2.el8.x86_64.rpm 13 MB/s | 1.1 MB 00:00 +(13/29): bind-libs-9.11.36-16.el8_10.2.x86_64.r 6.7 MB/s | 176 kB 00:00 +(14/29): zip-3.0-23.el8.x86_64.rpm 8.4 MB/s | 270 kB 00:00 +(15/29): bind-libs-lite-9.11.36-16.el8_10.2.x86 29 MB/s | 1.2 MB 00:00 +(16/29): bind-license-9.11.36-16.el8_10.2.noarc 3.3 MB/s | 104 kB 00:00 +(17/29): bind-utils-9.11.36-16.el8_10.2.x86_64. 13 MB/s | 453 kB 00:00 +(18/29): fstrm-0.6.1-3.el8.x86_64.rpm 1.2 MB/s | 29 kB 00:00 +(19/29): libmaxminddb-1.2.0-10.el8_9.1.x86_64.r 1.3 MB/s | 32 kB 00:00 +(20/29): geolite2-country-20180605-1.el8.noarch 17 MB/s | 1.0 MB 00:00 +(21/29): protobuf-c-1.3.0-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(22/29): python3-bind-9.11.36-16.el8_10.2.noarc 5.8 MB/s | 151 kB 00:00 +(23/29): wget-1.19.5-12.0.1.el8_10.x86_64.rpm 17 MB/s | 733 kB 00:00 +(24/29): curl-7.61.1-34.el8.x86_64.rpm 12 MB/s | 352 kB 00:00 +(25/29): dnf-plugins-core-4.0.21-25.0.1.el8.noa 2.4 MB/s | 76 kB 00:00 +(26/29): libcurl-7.61.1-34.el8.x86_64.rpm 8.6 MB/s | 303 kB 00:00 +(27/29): python3-dnf-plugins-core-4.0.21-25.0.1 9.8 MB/s | 263 kB 00:00 +(28/29): yum-utils-4.0.21-25.0.1.el8.noarch.rpm 3.0 MB/s | 75 kB 00:00 +(29/29): geolite2-city-20180605-1.el8.noarch.rp 66 MB/s | 19 MB 00:00 +-------------------------------------------------------------------------------- +Total 43 MB/s | 28 MB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Running scriptlet: protobuf-c-1.3.0-8.el8.x86_64 1/1 + Installing : protobuf-c-1.3.0-8.el8.x86_64 1/34 + Installing : fstrm-0.6.1-3.el8.x86_64 2/34 + Installing : bind-license-32:9.11.36-16.el8_10.2.noarch 3/34 + Upgrading : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 4/34 + Upgrading : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 5/34 + Upgrading : libcurl-7.61.1-34.el8.x86_64 6/34 + Installing : geolite2-country-20180605-1.el8.noarch 7/34 + Installing : geolite2-city-20180605-1.el8.noarch 8/34 + Installing : libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Running scriptlet: libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Installing : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 10/34 + Installing : bind-libs-32:9.11.36-16.el8_10.2.x86_64 11/34 + Installing : unzip-6.0-46.0.1.el8.x86_64 12/34 + Installing : tcl-1:8.6.8-2.el8.x86_64 13/34 + Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 13/34 + Installing : python3-ply-3.9-9.el8.noarch 14/34 + Installing : python3-bind-32:9.11.36-16.el8_10.2.noarch 15/34 + Installing : libmetalink-0.1.3-7.el8.x86_64 16/34 + Installing : wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Running scriptlet: wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Installing : bind-utils-32:9.11.36-16.el8_10.2.x86_64 18/34 + Installing : expect-5.45.4-5.el8.x86_64 19/34 + Installing : zip-3.0-23.el8.x86_64 20/34 + Upgrading : curl-7.61.1-34.el8.x86_64 21/34 + Upgrading : yum-utils-4.0.21-25.0.1.el8.noarch 22/34 + Installing : which-2.21-20.el8.x86_64 23/34 + Installing : tree-1.7.0-15.el8.x86_64 24/34 + Installing : sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Running scriptlet: sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Installing : openssl-1:1.1.1k-12.el8_9.x86_64 26/34 + Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Installing : lsof-4.93.2-1.el8.x86_64 28/34 + Installing : hostname-3.20-6.el8.x86_64 29/34 + Running scriptlet: hostname-3.20-6.el8.x86_64 29/34 + Cleanup : curl-7.61.1-33.el8_9.5.x86_64 30/34 + Cleanup : yum-utils-4.0.21-23.0.1.el8.noarch 31/34 + Cleanup : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Cleanup : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 33/34 + Cleanup : libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Running scriptlet: libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Verifying : expect-5.45.4-5.el8.x86_64 1/34 + Verifying : hostname-3.20-6.el8.x86_64 2/34 + Verifying : libmetalink-0.1.3-7.el8.x86_64 3/34 + Verifying : lsof-4.93.2-1.el8.x86_64 4/34 + Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 5/34 + Verifying : openssl-1:1.1.1k-12.el8_9.x86_64 6/34 + Verifying : python3-ply-3.9-9.el8.noarch 7/34 + Verifying : sudo-1.9.5p2-1.el8_9.x86_64 8/34 + Verifying : tcl-1:8.6.8-2.el8.x86_64 9/34 + Verifying : tree-1.7.0-15.el8.x86_64 10/34 + Verifying : unzip-6.0-46.0.1.el8.x86_64 11/34 + Verifying : which-2.21-20.el8.x86_64 12/34 + Verifying : zip-3.0-23.el8.x86_64 13/34 + Verifying : bind-libs-32:9.11.36-16.el8_10.2.x86_64 14/34 + Verifying : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 15/34 + Verifying : bind-license-32:9.11.36-16.el8_10.2.noarch 16/34 + Verifying : bind-utils-32:9.11.36-16.el8_10.2.x86_64 17/34 + Verifying : fstrm-0.6.1-3.el8.x86_64 18/34 + Verifying : geolite2-city-20180605-1.el8.noarch 19/34 + Verifying : geolite2-country-20180605-1.el8.noarch 20/34 + Verifying : libmaxminddb-1.2.0-10.el8_9.1.x86_64 21/34 + Verifying : protobuf-c-1.3.0-8.el8.x86_64 22/34 + Verifying : python3-bind-32:9.11.36-16.el8_10.2.noarch 23/34 + Verifying : wget-1.19.5-12.0.1.el8_10.x86_64 24/34 + Verifying : curl-7.61.1-34.el8.x86_64 25/34 + Verifying : curl-7.61.1-33.el8_9.5.x86_64 26/34 + Verifying : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 27/34 + Verifying : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 28/34 + Verifying : libcurl-7.61.1-34.el8.x86_64 29/34 + Verifying : libcurl-7.61.1-33.el8_9.5.x86_64 30/34 + Verifying : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 31/34 + Verifying : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Verifying : yum-utils-4.0.21-25.0.1.el8.noarch 33/34 + Verifying : yum-utils-4.0.21-23.0.1.el8.noarch 34/34 + +Upgraded: + curl-7.61.1-34.el8.x86_64 + dnf-plugins-core-4.0.21-25.0.1.el8.noarch + libcurl-7.61.1-34.el8.x86_64 + python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch + yum-utils-4.0.21-25.0.1.el8.noarch +Installed: + bind-libs-32:9.11.36-16.el8_10.2.x86_64 + bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 + bind-license-32:9.11.36-16.el8_10.2.noarch + bind-utils-32:9.11.36-16.el8_10.2.x86_64 + expect-5.45.4-5.el8.x86_64 + fstrm-0.6.1-3.el8.x86_64 + geolite2-city-20180605-1.el8.noarch + geolite2-country-20180605-1.el8.noarch + hostname-3.20-6.el8.x86_64 + libmaxminddb-1.2.0-10.el8_9.1.x86_64 + libmetalink-0.1.3-7.el8.x86_64 + lsof-4.93.2-1.el8.x86_64 + net-tools-2.0-0.52.20160912git.el8.x86_64 + openssl-1:1.1.1k-12.el8_9.x86_64 + protobuf-c-1.3.0-8.el8.x86_64 + python3-bind-32:9.11.36-16.el8_10.2.noarch + python3-ply-3.9-9.el8.noarch + sudo-1.9.5p2-1.el8_9.x86_64 + tcl-1:8.6.8-2.el8.x86_64 + tree-1.7.0-15.el8.x86_64 + unzip-6.0-46.0.1.el8.x86_64 + wget-1.19.5-12.0.1.el8_10.x86_64 + which-2.21-20.el8.x86_64 + zip-3.0-23.el8.x86_64 + +Complete! +Adding repo from: http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 +created by dnf config-manager from http://yum.o 496 kB/s | 139 kB 00:00 +Last metadata expiration check: 0:00:01 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Dependencies resolved. +============================================================================================== + Package Arch Version Repository Size +============================================================================================== +Installing: + java-11-openjdk-devel x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 3.4 M +Installing dependencies: + adwaita-cursor-theme noarch 3.28.0-3.el8 ol8_appstream 647 k + adwaita-icon-theme noarch 3.28.0-3.el8 ol8_appstream 11 M + alsa-lib x86_64 1.2.10-2.el8 ol8_appstream 500 k + at-spi2-atk x86_64 2.26.2-1.el8 ol8_appstream 89 k + at-spi2-core x86_64 2.28.0-1.el8 ol8_appstream 169 k + atk x86_64 2.28.1-1.el8 ol8_appstream 272 k + avahi-libs x86_64 0.7-27.el8 ol8_baseos_latest 61 k + cairo x86_64 1.15.12-6.el8 ol8_appstream 719 k + cairo-gobject x86_64 1.15.12-6.el8 ol8_appstream 33 k + colord-libs x86_64 1.4.2-1.el8 ol8_appstream 236 k + copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k + cpio x86_64 2.12-11.el8 ol8_baseos_latest 266 k + crypto-policies-scripts noarch 20230731-1.git3177e06.el8 ol8_baseos_latest 84 k + cups-libs x86_64 1:2.2.6-60.el8_10 ol8_baseos_latest 435 k + dracut x86_64 049-233.git20240115.0.1.el8 ol8_baseos_latest 382 k + file x86_64 5.33-25.el8 ol8_baseos_latest 77 k + fribidi x86_64 1.0.4-9.el8 ol8_appstream 89 k + gdk-pixbuf2 x86_64 2.36.12-6.el8_10 ol8_baseos_latest 465 k + gdk-pixbuf2-modules x86_64 2.36.12-6.el8_10 ol8_appstream 108 k + gettext x86_64 0.19.8.1-17.el8 ol8_baseos_latest 1.1 M + gettext-libs x86_64 0.19.8.1-17.el8 ol8_baseos_latest 312 k + glib-networking x86_64 2.56.1-1.1.el8 ol8_baseos_latest 155 k + graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k + grub2-common noarch 1:2.02-156.0.2.el8 ol8_baseos_latest 897 k + grub2-tools x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 2.0 M + grub2-tools-minimal x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 215 k + gsettings-desktop-schemas x86_64 3.32.0-6.el8 ol8_baseos_latest 633 k + gtk-update-icon-cache x86_64 3.22.30-11.el8 ol8_appstream 32 k + harfbuzz x86_64 1.7.5-4.el8 ol8_appstream 295 k + hicolor-icon-theme noarch 0.17-2.el8 ol8_appstream 48 k + jasper-libs x86_64 2.0.14-5.el8 ol8_appstream 167 k + java-11-openjdk x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 475 k + java-11-openjdk-headless x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 42 M + javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k + jbigkit-libs x86_64 2.1-14.el8 ol8_appstream 55 k + json-glib x86_64 1.4.4-1.el8 ol8_baseos_latest 144 k + kbd-legacy noarch 2.0.4-11.el8 ol8_baseos_latest 481 k + kbd-misc noarch 2.0.4-11.el8 ol8_baseos_latest 1.5 M + lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k + libX11 x86_64 1.6.8-8.el8 ol8_appstream 611 k + libX11-common noarch 1.6.8-8.el8 ol8_appstream 157 k + libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k + libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k + libXcursor x86_64 1.1.15-3.el8 ol8_appstream 36 k + libXdamage x86_64 1.1.4-14.el8 ol8_appstream 27 k + libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k + libXfixes x86_64 5.0.3-7.el8 ol8_appstream 25 k + libXft x86_64 2.3.3-1.el8 ol8_appstream 67 k + libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k + libXinerama x86_64 1.1.4-1.el8 ol8_appstream 15 k + libXrandr x86_64 1.5.2-1.el8 ol8_appstream 34 k + libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k + libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k + libcroco x86_64 0.6.12-4.el8_2.1 ol8_baseos_latest 113 k + libdatrie x86_64 0.2.9-7.el8 ol8_appstream 33 k + libepoxy x86_64 1.5.8-1.el8 ol8_appstream 225 k + libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k + libgomp x86_64 8.5.0-22.0.1.el8_10 ol8_baseos_latest 218 k + libgusb x86_64 0.3.0-1.el8 ol8_baseos_latest 49 k + libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k + libkcapi x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 52 k + libkcapi-hmaccalc x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 31 k + libmodman x86_64 2.0.1-17.el8 ol8_baseos_latest 36 k + libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k + libproxy x86_64 0.4.15-5.2.el8 ol8_baseos_latest 75 k + libsoup x86_64 2.62.3-5.el8 ol8_baseos_latest 424 k + libthai x86_64 0.1.27-2.el8 ol8_appstream 203 k + libtiff x86_64 4.0.9-32.el8_10 ol8_appstream 189 k + libwayland-client x86_64 1.21.0-1.el8 ol8_appstream 41 k + libwayland-cursor x86_64 1.21.0-1.el8 ol8_appstream 26 k + libwayland-egl x86_64 1.21.0-1.el8 ol8_appstream 19 k + libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k + libxkbcommon x86_64 0.9.1-1.el8 ol8_appstream 116 k + lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k + lua x86_64 5.3.4-12.el8 ol8_appstream 192 k + nspr x86_64 4.35.0-1.el8_8 ol8_appstream 143 k + nss x86_64 3.90.0-7.el8_10 ol8_appstream 750 k + nss-softokn x86_64 3.90.0-7.el8_10 ol8_appstream 1.2 M + nss-softokn-freebl x86_64 3.90.0-7.el8_10 ol8_appstream 375 k + nss-sysinit x86_64 3.90.0-7.el8_10 ol8_appstream 74 k + nss-util x86_64 3.90.0-7.el8_10 ol8_appstream 139 k + os-prober x86_64 1.74-9.0.1.el8 ol8_baseos_latest 51 k + pango x86_64 1.42.4-8.el8 ol8_appstream 297 k + pixman x86_64 0.38.4-4.el8 ol8_appstream 256 k + pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k + pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k + pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k + rest x86_64 0.8.1-2.el8 ol8_appstream 70 k + shared-mime-info x86_64 1.9-4.el8 ol8_baseos_latest 328 k + systemd-udev x86_64 239-78.0.4.el8 ol8_baseos_latest 1.6 M + ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k + tzdata-java noarch 2024a-1.0.1.el8 ol8_appstream 186 k + xkeyboard-config noarch 2.28-1.el8 ol8_appstream 782 k + xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k + xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k + xz x86_64 5.2.4-4.el8_6 ol8_baseos_latest 153 k +Installing weak dependencies: + abattis-cantarell-fonts noarch 0.0.25-6.el8 ol8_appstream 155 k + dconf x86_64 0.28.0-4.0.1.el8 ol8_appstream 108 k + dejavu-sans-mono-fonts noarch 2.35-7.el8 ol8_baseos_latest 447 k + grubby x86_64 8.40-49.0.2.el8 ol8_baseos_latest 50 k + gtk3 x86_64 3.22.30-11.el8 ol8_appstream 4.5 M + hardlink x86_64 1:1.3-6.el8 ol8_baseos_latest 29 k + kbd x86_64 2.0.4-11.el8 ol8_baseos_latest 390 k + memstrack x86_64 0.2.5-2.el8 ol8_baseos_latest 51 k + pigz x86_64 2.4-4.el8 ol8_baseos_latest 80 k +Enabling module streams: + javapackages-runtime 201801 + +Transaction Summary +============================================================================================== +Install 106 Packages + +Total download size: 86 M +Installed size: 312 M +Downloading Packages: +(1/106): crypto-policies-scripts-20230731-1.git 862 kB/s | 84 kB 00:00 +(2/106): avahi-libs-0.7-27.el8.x86_64.rpm 602 kB/s | 61 kB 00:00 +(3/106): cpio-2.12-11.el8.x86_64.rpm 1.8 MB/s | 266 kB 00:00 +(4/106): cups-libs-2.2.6-60.el8_10.x86_64.rpm 5.7 MB/s | 435 kB 00:00 +(5/106): dejavu-sans-mono-fonts-2.35-7.el8.noar 5.1 MB/s | 447 kB 00:00 +(6/106): dracut-049-233.git20240115.0.1.el8.x86 7.0 MB/s | 382 kB 00:00 +(7/106): gdk-pixbuf2-2.36.12-6.el8_10.x86_64.rp 12 MB/s | 465 kB 00:00 +(8/106): gettext-libs-0.19.8.1-17.el8.x86_64.rp 9.3 MB/s | 312 kB 00:00 +(9/106): gettext-0.19.8.1-17.el8.x86_64.rpm 16 MB/s | 1.1 MB 00:00 +(10/106): glib-networking-2.56.1-1.1.el8.x86_64 6.0 MB/s | 155 kB 00:00 +(11/106): grub2-common-2.02-156.0.2.el8.noarch. 26 MB/s | 897 kB 00:00 +(12/106): grub2-tools-minimal-2.02-156.0.2.el8. 8.2 MB/s | 215 kB 00:00 +(13/106): grubby-8.40-49.0.2.el8.x86_64.rpm 2.1 MB/s | 50 kB 00:00 +(14/106): grub2-tools-2.02-156.0.2.el8.x86_64.r 26 MB/s | 2.0 MB 00:00 +(15/106): gsettings-desktop-schemas-3.32.0-6.el 19 MB/s | 633 kB 00:00 +(16/106): hardlink-1.3-6.el8.x86_64.rpm 1.1 MB/s | 29 kB 00:00 +(17/106): json-glib-1.4.4-1.el8.x86_64.rpm 5.9 MB/s | 144 kB 00:00 +(18/106): kbd-2.0.4-11.el8.x86_64.rpm 14 MB/s | 390 kB 00:00 +(19/106): kbd-legacy-2.0.4-11.el8.noarch.rpm 17 MB/s | 481 kB 00:00 +(20/106): kbd-misc-2.0.4-11.el8.noarch.rpm 41 MB/s | 1.5 MB 00:00 +(21/106): libcroco-0.6.12-4.el8_2.1.x86_64.rpm 4.7 MB/s | 113 kB 00:00 +(22/106): libgomp-8.5.0-22.0.1.el8_10.x86_64.rp 9.1 MB/s | 218 kB 00:00 +(23/106): libgusb-0.3.0-1.el8.x86_64.rpm 2.1 MB/s | 49 kB 00:00 +(24/106): libkcapi-1.4.0-2.0.1.el8.x86_64.rpm 1.6 MB/s | 52 kB 00:00 +(25/106): libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86 822 kB/s | 31 kB 00:00 +(26/106): libmodman-2.0.1-17.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(27/106): libpkgconf-1.4.2-1.el8.x86_64.rpm 1.2 MB/s | 35 kB 00:00 +(28/106): libproxy-0.4.15-5.2.el8.x86_64.rpm 3.0 MB/s | 75 kB 00:00 +(29/106): libsoup-2.62.3-5.el8.x86_64.rpm 15 MB/s | 424 kB 00:00 +(30/106): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.5 MB/s | 100 kB 00:00 +(31/106): memstrack-0.2.5-2.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(32/106): os-prober-1.74-9.0.1.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(33/106): pigz-2.4-4.el8.x86_64.rpm 3.5 MB/s | 80 kB 00:00 +(34/106): pkgconf-1.4.2-1.el8.x86_64.rpm 1.7 MB/s | 38 kB 00:00 +(35/106): pkgconf-m4-1.4.2-1.el8.noarch.rpm 761 kB/s | 17 kB 00:00 +(36/106): pkgconf-pkg-config-1.4.2-1.el8.x86_64 691 kB/s | 15 kB 00:00 +(37/106): shared-mime-info-1.9-4.el8.x86_64.rpm 13 MB/s | 328 kB 00:00 +(38/106): systemd-udev-239-78.0.4.el8.x86_64.rp 32 MB/s | 1.6 MB 00:00 +(39/106): xz-5.2.4-4.el8_6.x86_64.rpm 5.2 MB/s | 153 kB 00:00 +(40/106): abattis-cantarell-fonts-0.0.25-6.el8. 6.4 MB/s | 155 kB 00:00 +(41/106): adwaita-cursor-theme-3.28.0-3.el8.noa 22 MB/s | 647 kB 00:00 +(42/106): alsa-lib-1.2.10-2.el8.x86_64.rpm 18 MB/s | 500 kB 00:00 +(43/106): at-spi2-atk-2.26.2-1.el8.x86_64.rpm 3.8 MB/s | 89 kB 00:00 +(44/106): at-spi2-core-2.28.0-1.el8.x86_64.rpm 6.9 MB/s | 169 kB 00:00 +(45/106): atk-2.28.1-1.el8.x86_64.rpm 9.2 MB/s | 272 kB 00:00 +(46/106): cairo-1.15.12-6.el8.x86_64.rpm 24 MB/s | 719 kB 00:00 +(47/106): adwaita-icon-theme-3.28.0-3.el8.noarc 65 MB/s | 11 MB 00:00 +(48/106): cairo-gobject-1.15.12-6.el8.x86_64.rp 914 kB/s | 33 kB 00:00 +(49/106): colord-libs-1.4.2-1.el8.x86_64.rpm 9.5 MB/s | 236 kB 00:00 +(50/106): copy-jdk-configs-4.0-2.el8.noarch.rpm 1.1 MB/s | 30 kB 00:00 +(51/106): dconf-0.28.0-4.0.1.el8.x86_64.rpm 4.4 MB/s | 108 kB 00:00 +(52/106): fribidi-1.0.4-9.el8.x86_64.rpm 3.9 MB/s | 89 kB 00:00 +(53/106): graphite2-1.3.10-10.el8.x86_64.rpm 5.1 MB/s | 122 kB 00:00 +(54/106): gdk-pixbuf2-modules-2.36.12-6.el8_10. 3.6 MB/s | 108 kB 00:00 +(55/106): gtk-update-icon-cache-3.22.30-11.el8. 1.4 MB/s | 32 kB 00:00 +(56/106): harfbuzz-1.7.5-4.el8.x86_64.rpm 11 MB/s | 295 kB 00:00 +(57/106): gtk3-3.22.30-11.el8.x86_64.rpm 68 MB/s | 4.5 MB 00:00 +(58/106): hicolor-icon-theme-0.17-2.el8.noarch. 2.1 MB/s | 48 kB 00:00 +(59/106): java-11-openjdk-11.0.24.0.8-3.0.1.el8 17 MB/s | 475 kB 00:00 +(60/106): jasper-libs-2.0.14-5.el8.x86_64.rpm 5.0 MB/s | 167 kB 00:00 +(61/106): java-11-openjdk-devel-11.0.24.0.8-3.0 61 MB/s | 3.4 MB 00:00 +(62/106): javapackages-filesystem-5.3.0-1.modul 1.2 MB/s | 30 kB 00:00 +(63/106): jbigkit-libs-2.1-14.el8.x86_64.rpm 2.1 MB/s | 55 kB 00:00 +(64/106): lcms2-2.9-2.el8.x86_64.rpm 3.8 MB/s | 164 kB 00:00 +(65/106): libX11-1.6.8-8.el8.x86_64.rpm 20 MB/s | 611 kB 00:00 +(66/106): libX11-common-1.6.8-8.el8.noarch.rpm 6.8 MB/s | 157 kB 00:00 +(67/106): libXau-1.0.9-3.el8.x86_64.rpm 1.6 MB/s | 37 kB 00:00 +(68/106): libXcomposite-0.4.4-14.el8.x86_64.rpm 1.3 MB/s | 28 kB 00:00 +(69/106): libXcursor-1.1.15-3.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(70/106): libXdamage-1.1.4-14.el8.x86_64.rpm 1.2 MB/s | 27 kB 00:00 +(71/106): libXext-1.3.4-1.el8.x86_64.rpm 2.0 MB/s | 45 kB 00:00 +(72/106): libXfixes-5.0.3-7.el8.x86_64.rpm 1.1 MB/s | 25 kB 00:00 +(73/106): libXft-2.3.3-1.el8.x86_64.rpm 2.9 MB/s | 67 kB 00:00 +(74/106): libXi-1.7.10-1.el8.x86_64.rpm 2.2 MB/s | 49 kB 00:00 +(75/106): libXinerama-1.1.4-1.el8.x86_64.rpm 717 kB/s | 15 kB 00:00 +(76/106): libXrandr-1.5.2-1.el8.x86_64.rpm 1.5 MB/s | 34 kB 00:00 +(77/106): libXrender-0.9.10-7.el8.x86_64.rpm 1.4 MB/s | 33 kB 00:00 +(78/106): libXtst-1.2.3-7.el8.x86_64.rpm 957 kB/s | 22 kB 00:00 +(79/106): java-11-openjdk-headless-11.0.24.0.8- 71 MB/s | 42 MB 00:00 +(80/106): libdatrie-0.2.9-7.el8.x86_64.rpm 274 kB/s | 33 kB 00:00 +(81/106): libepoxy-1.5.8-1.el8.x86_64.rpm 9.1 MB/s | 225 kB 00:00 +(82/106): libfontenc-1.1.3-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(83/106): libthai-0.1.27-2.el8.x86_64.rpm 8.2 MB/s | 203 kB 00:00 +(84/106): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 5.1 MB/s | 157 kB 00:00 +(85/106): libtiff-4.0.9-32.el8_10.x86_64.rpm 7.8 MB/s | 189 kB 00:00 +(86/106): libwayland-client-1.21.0-1.el8.x86_64 1.7 MB/s | 41 kB 00:00 +(87/106): libwayland-cursor-1.21.0-1.el8.x86_64 1.2 MB/s | 26 kB 00:00 +(88/106): libwayland-egl-1.21.0-1.el8.x86_64.rp 801 kB/s | 19 kB 00:00 +(89/106): libxcb-1.13.1-1.el8.x86_64.rpm 9.7 MB/s | 231 kB 00:00 +(90/106): libxkbcommon-0.9.1-1.el8.x86_64.rpm 5.0 MB/s | 116 kB 00:00 +(91/106): nspr-4.35.0-1.el8_8.x86_64.rpm 6.0 MB/s | 143 kB 00:00 +(92/106): lua-5.3.4-12.el8.x86_64.rpm 5.9 MB/s | 192 kB 00:00 +(93/106): nss-softokn-3.90.0-7.el8_10.x86_64.rp 38 MB/s | 1.2 MB 00:00 +(94/106): nss-3.90.0-7.el8_10.x86_64.rpm 17 MB/s | 750 kB 00:00 +(95/106): nss-softokn-freebl-3.90.0-7.el8_10.x8 14 MB/s | 375 kB 00:00 +(96/106): nss-sysinit-3.90.0-7.el8_10.x86_64.rp 3.2 MB/s | 74 kB 00:00 +(97/106): nss-util-3.90.0-7.el8_10.x86_64.rpm 5.8 MB/s | 139 kB 00:00 +(98/106): pango-1.42.4-8.el8.x86_64.rpm 11 MB/s | 297 kB 00:00 +(99/106): pixman-0.38.4-4.el8.x86_64.rpm 10 MB/s | 256 kB 00:00 +(100/106): rest-0.8.1-2.el8.x86_64.rpm 3.1 MB/s | 70 kB 00:00 +(101/106): ttmkfdir-3.0.9-54.el8.x86_64.rpm 2.5 MB/s | 62 kB 00:00 +(102/106): tzdata-java-2024a-1.0.1.el8.noarch.r 7.4 MB/s | 186 kB 00:00 +(103/106): xkeyboard-config-2.28-1.el8.noarch.r 27 MB/s | 782 kB 00:00 +(104/106): xorg-x11-font-utils-7.5-41.el8.x86_6 3.9 MB/s | 104 kB 00:00 +(105/106): xorg-x11-fonts-Type1-7.5-19.el8.noar 1.3 MB/s | 522 kB 00:00 +(106/106): file-5.33-25.el8.x86_64.rpm 26 kB/s | 77 kB 00:02 +-------------------------------------------------------------------------------- +Total 27 MB/s | 86 MB 00:03 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 1/1 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86 1/1 + Preparing : 1/1 + Installing : nspr-4.35.0-1.el8_8.x86_64 1/106 + Running scriptlet: nspr-4.35.0-1.el8_8.x86_64 1/106 + Installing : nss-util-3.90.0-7.el8_10.x86_64 2/106 + Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/106 + Installing : pixman-0.38.4-4.el8.x86_64 4/106 + Installing : libwayland-client-1.21.0-1.el8.x86_64 5/106 + Installing : atk-2.28.1-1.el8.x86_64 6/106 + Installing : libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Running scriptlet: libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Installing : libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Running scriptlet: libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Installing : grub2-common-1:2.02-156.0.2.el8.noarch 9/106 + Installing : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Installing : gettext-0.19.8.1-17.el8.x86_64 11/106 + Running scriptlet: gettext-0.19.8.1-17.el8.x86_64 11/106 + Installing : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 12/106 + Installing : libwayland-cursor-1.21.0-1.el8.x86_64 13/106 + Installing : jasper-libs-2.0.14-5.el8.x86_64 14/106 + Installing : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 15/106 + Installing : nss-softokn-3.90.0-7.el8_10.x86_64 16/106 + Installing : xkeyboard-config-2.28-1.el8.noarch 17/106 + Installing : libxkbcommon-0.9.1-1.el8.x86_64 18/106 + Installing : tzdata-java-2024a-1.0.1.el8.noarch 19/106 + Installing : ttmkfdir-3.0.9-54.el8.x86_64 20/106 + Installing : lua-5.3.4-12.el8.x86_64 21/106 + Installing : copy-jdk-configs-4.0-2.el8.noarch 22/106 + Installing : libwayland-egl-1.21.0-1.el8.x86_64 23/106 + Installing : libfontenc-1.1.3-8.el8.x86_64 24/106 + Installing : libepoxy-1.5.8-1.el8.x86_64 25/106 + Installing : libdatrie-0.2.9-7.el8.x86_64 26/106 + Running scriptlet: libdatrie-0.2.9-7.el8.x86_64 26/106 + Installing : libthai-0.1.27-2.el8.x86_64 27/106 + Running scriptlet: libthai-0.1.27-2.el8.x86_64 27/106 + Installing : libXau-1.0.9-3.el8.x86_64 28/106 + Installing : libxcb-1.13.1-1.el8.x86_64 29/106 + Installing : libX11-common-1.6.8-8.el8.noarch 30/106 + Installing : libX11-1.6.8-8.el8.x86_64 31/106 + Installing : libXext-1.3.4-1.el8.x86_64 32/106 + Installing : libXrender-0.9.10-7.el8.x86_64 33/106 + Installing : cairo-1.15.12-6.el8.x86_64 34/106 + Installing : libXi-1.7.10-1.el8.x86_64 35/106 + Installing : libXfixes-5.0.3-7.el8.x86_64 36/106 + Installing : libXtst-1.2.3-7.el8.x86_64 37/106 + Installing : libXcomposite-0.4.4-14.el8.x86_64 38/106 + Installing : at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Running scriptlet: at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Installing : at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Running scriptlet: at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Installing : libXcursor-1.1.15-3.el8.x86_64 41/106 + Installing : libXdamage-1.1.4-14.el8.x86_64 42/106 + Installing : cairo-gobject-1.15.12-6.el8.x86_64 43/106 + Installing : libXft-2.3.3-1.el8.x86_64 44/106 + Installing : libXrandr-1.5.2-1.el8.x86_64 45/106 + Installing : libXinerama-1.1.4-1.el8.x86_64 46/106 + Installing : lcms2-2.9-2.el8.x86_64 47/106 + Running scriptlet: lcms2-2.9-2.el8.x86_64 47/106 + Installing : jbigkit-libs-2.1-14.el8.x86_64 48/106 + Running scriptlet: jbigkit-libs-2.1-14.el8.x86_64 48/106 + Installing : libtiff-4.0.9-32.el8_10.x86_64 49/106 + Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+ 50/106 + Installing : hicolor-icon-theme-0.17-2.el8.noarch 51/106 + Installing : graphite2-1.3.10-10.el8.x86_64 52/106 + Installing : harfbuzz-1.7.5-4.el8.x86_64 53/106 + Running scriptlet: harfbuzz-1.7.5-4.el8.x86_64 53/106 + Installing : fribidi-1.0.4-9.el8.x86_64 54/106 + Installing : pango-1.42.4-8.el8.x86_64 55/106 + Running scriptlet: pango-1.42.4-8.el8.x86_64 55/106 + Installing : dconf-0.28.0-4.0.1.el8.x86_64 56/106 + Installing : alsa-lib-1.2.10-2.el8.x86_64 57/106 + Running scriptlet: alsa-lib-1.2.10-2.el8.x86_64 57/106 + Installing : adwaita-cursor-theme-3.28.0-3.el8.noarch 58/106 + Installing : adwaita-icon-theme-3.28.0-3.el8.noarch 59/106 + Installing : abattis-cantarell-fonts-0.0.25-6.el8.noarch 60/106 + Installing : xz-5.2.4-4.el8_6.x86_64 61/106 + Installing : shared-mime-info-1.9-4.el8.x86_64 62/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 62/106 + Installing : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Installing : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 64/106 + Installing : gtk-update-icon-cache-3.22.30-11.el8.x86_64 65/106 + Installing : pkgconf-m4-1.4.2-1.el8.noarch 66/106 + Installing : pigz-2.4-4.el8.x86_64 67/106 + Installing : memstrack-0.2.5-2.el8.x86_64 68/106 + Installing : lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Installing : libpkgconf-1.4.2-1.el8.x86_64 70/106 + Installing : pkgconf-1.4.2-1.el8.x86_64 71/106 + Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 72/106 + Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 73/106 + Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Installing : libmodman-2.0.1-17.el8.x86_64 75/106 + Running scriptlet: libmodman-2.0.1-17.el8.x86_64 75/106 + Installing : libproxy-0.4.15-5.2.el8.x86_64 76/106 + Running scriptlet: libproxy-0.4.15-5.2.el8.x86_64 76/106 + Installing : libkcapi-1.4.0-2.0.1.el8.x86_64 77/106 + Installing : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 78/106 + Installing : libgusb-0.3.0-1.el8.x86_64 79/106 + Installing : colord-libs-1.4.2-1.el8.x86_64 80/106 + Installing : kbd-misc-2.0.4-11.el8.noarch 81/106 + Installing : kbd-legacy-2.0.4-11.el8.noarch 82/106 + Installing : kbd-2.0.4-11.el8.x86_64 83/106 + Installing : systemd-udev-239-78.0.4.el8.x86_64 84/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 84/106 + Installing : os-prober-1.74-9.0.1.el8.x86_64 85/106 + Installing : json-glib-1.4.4-1.el8.x86_64 86/106 + Installing : hardlink-1:1.3-6.el8.x86_64 87/106 + Installing : file-5.33-25.el8.x86_64 88/106 + Installing : dejavu-sans-mono-fonts-2.35-7.el8.noarch 89/106 + Installing : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 90/106 + Installing : glib-networking-2.56.1-1.1.el8.x86_64 91/106 + Installing : libsoup-2.62.3-5.el8.x86_64 92/106 + Installing : rest-0.8.1-2.el8.x86_64 93/106 + Running scriptlet: rest-0.8.1-2.el8.x86_64 93/106 + Installing : cpio-2.12-11.el8.x86_64 94/106 + Installing : dracut-049-233.git20240115.0.1.el8.x86_64 95/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grubby-8.40-49.0.2.el8.x86_64 97/106 + Installing : crypto-policies-scripts-20230731-1.git3177e06.el 98/106 + Installing : nss-sysinit-3.90.0-7.el8_10.x86_64 99/106 + Installing : nss-3.90.0-7.el8_10.x86_64 100/106 + Installing : avahi-libs-0.7-27.el8.x86_64 101/106 + Installing : cups-libs-1:2.2.6-60.el8_10.x86_64 102/106 + Installing : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Installing : gtk3-3.22.30-11.el8.x86_64 104/106 + Installing : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Installing : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 106/106 + Running scriptlet: dconf-0.28.0-4.0.1.el8.x86_64 106/106 + Running scriptlet: crypto-policies-scripts-20230731-1.git3177e06.el 106/106 + Running scriptlet: nss-3.90.0-7.el8_10.x86_64 106/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 106/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: hicolor-icon-theme-0.17-2.el8.noarch 106/106 + Running scriptlet: adwaita-icon-theme-3.28.0-3.el8.noarch 106/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 106/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 106/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 106/106 + Verifying : avahi-libs-0.7-27.el8.x86_64 1/106 + Verifying : cpio-2.12-11.el8.x86_64 2/106 + Verifying : crypto-policies-scripts-20230731-1.git3177e06.el 3/106 + Verifying : cups-libs-1:2.2.6-60.el8_10.x86_64 4/106 + Verifying : dejavu-sans-mono-fonts-2.35-7.el8.noarch 5/106 + Verifying : dracut-049-233.git20240115.0.1.el8.x86_64 6/106 + Verifying : file-5.33-25.el8.x86_64 7/106 + Verifying : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 8/106 + Verifying : gettext-0.19.8.1-17.el8.x86_64 9/106 + Verifying : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Verifying : glib-networking-2.56.1-1.1.el8.x86_64 11/106 + Verifying : grub2-common-1:2.02-156.0.2.el8.noarch 12/106 + Verifying : grub2-tools-1:2.02-156.0.2.el8.x86_64 13/106 + Verifying : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 14/106 + Verifying : grubby-8.40-49.0.2.el8.x86_64 15/106 + Verifying : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 16/106 + Verifying : hardlink-1:1.3-6.el8.x86_64 17/106 + Verifying : json-glib-1.4.4-1.el8.x86_64 18/106 + Verifying : kbd-2.0.4-11.el8.x86_64 19/106 + Verifying : kbd-legacy-2.0.4-11.el8.noarch 20/106 + Verifying : kbd-misc-2.0.4-11.el8.noarch 21/106 + Verifying : libcroco-0.6.12-4.el8_2.1.x86_64 22/106 + Verifying : libgomp-8.5.0-22.0.1.el8_10.x86_64 23/106 + Verifying : libgusb-0.3.0-1.el8.x86_64 24/106 + Verifying : libkcapi-1.4.0-2.0.1.el8.x86_64 25/106 + Verifying : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 26/106 + Verifying : libmodman-2.0.1-17.el8.x86_64 27/106 + Verifying : libpkgconf-1.4.2-1.el8.x86_64 28/106 + Verifying : libproxy-0.4.15-5.2.el8.x86_64 29/106 + Verifying : libsoup-2.62.3-5.el8.x86_64 30/106 + Verifying : lksctp-tools-1.0.18-3.el8.x86_64 31/106 + Verifying : memstrack-0.2.5-2.el8.x86_64 32/106 + Verifying : os-prober-1.74-9.0.1.el8.x86_64 33/106 + Verifying : pigz-2.4-4.el8.x86_64 34/106 + Verifying : pkgconf-1.4.2-1.el8.x86_64 35/106 + Verifying : pkgconf-m4-1.4.2-1.el8.noarch 36/106 + Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 37/106 + Verifying : shared-mime-info-1.9-4.el8.x86_64 38/106 + Verifying : systemd-udev-239-78.0.4.el8.x86_64 39/106 + Verifying : xz-5.2.4-4.el8_6.x86_64 40/106 + Verifying : abattis-cantarell-fonts-0.0.25-6.el8.noarch 41/106 + Verifying : adwaita-cursor-theme-3.28.0-3.el8.noarch 42/106 + Verifying : adwaita-icon-theme-3.28.0-3.el8.noarch 43/106 + Verifying : alsa-lib-1.2.10-2.el8.x86_64 44/106 + Verifying : at-spi2-atk-2.26.2-1.el8.x86_64 45/106 + Verifying : at-spi2-core-2.28.0-1.el8.x86_64 46/106 + Verifying : atk-2.28.1-1.el8.x86_64 47/106 + Verifying : cairo-1.15.12-6.el8.x86_64 48/106 + Verifying : cairo-gobject-1.15.12-6.el8.x86_64 49/106 + Verifying : colord-libs-1.4.2-1.el8.x86_64 50/106 + Verifying : copy-jdk-configs-4.0-2.el8.noarch 51/106 + Verifying : dconf-0.28.0-4.0.1.el8.x86_64 52/106 + Verifying : fribidi-1.0.4-9.el8.x86_64 53/106 + Verifying : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 54/106 + Verifying : graphite2-1.3.10-10.el8.x86_64 55/106 + Verifying : gtk-update-icon-cache-3.22.30-11.el8.x86_64 56/106 + Verifying : gtk3-3.22.30-11.el8.x86_64 57/106 + Verifying : harfbuzz-1.7.5-4.el8.x86_64 58/106 + Verifying : hicolor-icon-theme-0.17-2.el8.noarch 59/106 + Verifying : jasper-libs-2.0.14-5.el8.x86_64 60/106 + Verifying : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 61/106 + Verifying : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 62/106 + Verifying : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 63/106 + Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+ 64/106 + Verifying : jbigkit-libs-2.1-14.el8.x86_64 65/106 + Verifying : lcms2-2.9-2.el8.x86_64 66/106 + Verifying : libX11-1.6.8-8.el8.x86_64 67/106 + Verifying : libX11-common-1.6.8-8.el8.noarch 68/106 + Verifying : libXau-1.0.9-3.el8.x86_64 69/106 + Verifying : libXcomposite-0.4.4-14.el8.x86_64 70/106 + Verifying : libXcursor-1.1.15-3.el8.x86_64 71/106 + Verifying : libXdamage-1.1.4-14.el8.x86_64 72/106 + Verifying : libXext-1.3.4-1.el8.x86_64 73/106 + Verifying : libXfixes-5.0.3-7.el8.x86_64 74/106 + Verifying : libXft-2.3.3-1.el8.x86_64 75/106 + Verifying : libXi-1.7.10-1.el8.x86_64 76/106 + Verifying : libXinerama-1.1.4-1.el8.x86_64 77/106 + Verifying : libXrandr-1.5.2-1.el8.x86_64 78/106 + Verifying : libXrender-0.9.10-7.el8.x86_64 79/106 + Verifying : libXtst-1.2.3-7.el8.x86_64 80/106 + Verifying : libdatrie-0.2.9-7.el8.x86_64 81/106 + Verifying : libepoxy-1.5.8-1.el8.x86_64 82/106 + Verifying : libfontenc-1.1.3-8.el8.x86_64 83/106 + Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 84/106 + Verifying : libthai-0.1.27-2.el8.x86_64 85/106 + Verifying : libtiff-4.0.9-32.el8_10.x86_64 86/106 + Verifying : libwayland-client-1.21.0-1.el8.x86_64 87/106 + Verifying : libwayland-cursor-1.21.0-1.el8.x86_64 88/106 + Verifying : libwayland-egl-1.21.0-1.el8.x86_64 89/106 + Verifying : libxcb-1.13.1-1.el8.x86_64 90/106 + Verifying : libxkbcommon-0.9.1-1.el8.x86_64 91/106 + Verifying : lua-5.3.4-12.el8.x86_64 92/106 + Verifying : nspr-4.35.0-1.el8_8.x86_64 93/106 + Verifying : nss-3.90.0-7.el8_10.x86_64 94/106 + Verifying : nss-softokn-3.90.0-7.el8_10.x86_64 95/106 + Verifying : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 96/106 + Verifying : nss-sysinit-3.90.0-7.el8_10.x86_64 97/106 + Verifying : nss-util-3.90.0-7.el8_10.x86_64 98/106 + Verifying : pango-1.42.4-8.el8.x86_64 99/106 + Verifying : pixman-0.38.4-4.el8.x86_64 100/106 + Verifying : rest-0.8.1-2.el8.x86_64 101/106 + Verifying : ttmkfdir-3.0.9-54.el8.x86_64 102/106 + Verifying : tzdata-java-2024a-1.0.1.el8.noarch 103/106 + Verifying : xkeyboard-config-2.28-1.el8.noarch 104/106 + Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 105/106 + Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 106/106 + +Installed: + abattis-cantarell-fonts-0.0.25-6.el8.noarch + adwaita-cursor-theme-3.28.0-3.el8.noarch + adwaita-icon-theme-3.28.0-3.el8.noarch + alsa-lib-1.2.10-2.el8.x86_64 + at-spi2-atk-2.26.2-1.el8.x86_64 + at-spi2-core-2.28.0-1.el8.x86_64 + atk-2.28.1-1.el8.x86_64 + avahi-libs-0.7-27.el8.x86_64 + cairo-1.15.12-6.el8.x86_64 + cairo-gobject-1.15.12-6.el8.x86_64 + colord-libs-1.4.2-1.el8.x86_64 + copy-jdk-configs-4.0-2.el8.noarch + cpio-2.12-11.el8.x86_64 + crypto-policies-scripts-20230731-1.git3177e06.el8.noarch + cups-libs-1:2.2.6-60.el8_10.x86_64 + dconf-0.28.0-4.0.1.el8.x86_64 + dejavu-sans-mono-fonts-2.35-7.el8.noarch + dracut-049-233.git20240115.0.1.el8.x86_64 + file-5.33-25.el8.x86_64 + fribidi-1.0.4-9.el8.x86_64 + gdk-pixbuf2-2.36.12-6.el8_10.x86_64 + gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 + gettext-0.19.8.1-17.el8.x86_64 + gettext-libs-0.19.8.1-17.el8.x86_64 + glib-networking-2.56.1-1.1.el8.x86_64 + graphite2-1.3.10-10.el8.x86_64 + grub2-common-1:2.02-156.0.2.el8.noarch + grub2-tools-1:2.02-156.0.2.el8.x86_64 + grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 + grubby-8.40-49.0.2.el8.x86_64 + gsettings-desktop-schemas-3.32.0-6.el8.x86_64 + gtk-update-icon-cache-3.22.30-11.el8.x86_64 + gtk3-3.22.30-11.el8.x86_64 + hardlink-1:1.3-6.el8.x86_64 + harfbuzz-1.7.5-4.el8.x86_64 + hicolor-icon-theme-0.17-2.el8.noarch + jasper-libs-2.0.14-5.el8.x86_64 + java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86_64 + javapackages-filesystem-5.3.0-1.module+el8+5136+7ff78f74.noarch + jbigkit-libs-2.1-14.el8.x86_64 + json-glib-1.4.4-1.el8.x86_64 + kbd-2.0.4-11.el8.x86_64 + kbd-legacy-2.0.4-11.el8.noarch + kbd-misc-2.0.4-11.el8.noarch + lcms2-2.9-2.el8.x86_64 + libX11-1.6.8-8.el8.x86_64 + libX11-common-1.6.8-8.el8.noarch + libXau-1.0.9-3.el8.x86_64 + libXcomposite-0.4.4-14.el8.x86_64 + libXcursor-1.1.15-3.el8.x86_64 + libXdamage-1.1.4-14.el8.x86_64 + libXext-1.3.4-1.el8.x86_64 + libXfixes-5.0.3-7.el8.x86_64 + libXft-2.3.3-1.el8.x86_64 + libXi-1.7.10-1.el8.x86_64 + libXinerama-1.1.4-1.el8.x86_64 + libXrandr-1.5.2-1.el8.x86_64 + libXrender-0.9.10-7.el8.x86_64 + libXtst-1.2.3-7.el8.x86_64 + libcroco-0.6.12-4.el8_2.1.x86_64 + libdatrie-0.2.9-7.el8.x86_64 + libepoxy-1.5.8-1.el8.x86_64 + libfontenc-1.1.3-8.el8.x86_64 + libgomp-8.5.0-22.0.1.el8_10.x86_64 + libgusb-0.3.0-1.el8.x86_64 + libjpeg-turbo-1.5.3-12.el8.x86_64 + libkcapi-1.4.0-2.0.1.el8.x86_64 + libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 + libmodman-2.0.1-17.el8.x86_64 + libpkgconf-1.4.2-1.el8.x86_64 + libproxy-0.4.15-5.2.el8.x86_64 + libsoup-2.62.3-5.el8.x86_64 + libthai-0.1.27-2.el8.x86_64 + libtiff-4.0.9-32.el8_10.x86_64 + libwayland-client-1.21.0-1.el8.x86_64 + libwayland-cursor-1.21.0-1.el8.x86_64 + libwayland-egl-1.21.0-1.el8.x86_64 + libxcb-1.13.1-1.el8.x86_64 + libxkbcommon-0.9.1-1.el8.x86_64 + lksctp-tools-1.0.18-3.el8.x86_64 + lua-5.3.4-12.el8.x86_64 + memstrack-0.2.5-2.el8.x86_64 + nspr-4.35.0-1.el8_8.x86_64 + nss-3.90.0-7.el8_10.x86_64 + nss-softokn-3.90.0-7.el8_10.x86_64 + nss-softokn-freebl-3.90.0-7.el8_10.x86_64 + nss-sysinit-3.90.0-7.el8_10.x86_64 + nss-util-3.90.0-7.el8_10.x86_64 + os-prober-1.74-9.0.1.el8.x86_64 + pango-1.42.4-8.el8.x86_64 + pigz-2.4-4.el8.x86_64 + pixman-0.38.4-4.el8.x86_64 + pkgconf-1.4.2-1.el8.x86_64 + pkgconf-m4-1.4.2-1.el8.noarch + pkgconf-pkg-config-1.4.2-1.el8.x86_64 + rest-0.8.1-2.el8.x86_64 + shared-mime-info-1.9-4.el8.x86_64 + systemd-udev-239-78.0.4.el8.x86_64 + ttmkfdir-3.0.9-54.el8.x86_64 + tzdata-java-2024a-1.0.1.el8.noarch + xkeyboard-config-2.28-1.el8.noarch + xorg-x11-font-utils-1:7.5-41.el8.x86_64 + xorg-x11-fonts-Type1-7.5-19.el8.noarch + xz-5.2.4-4.el8_6.x86_64 + +Complete! +Last metadata expiration check: 0:00:23 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Package iproute-6.2.0-5.el8_9.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Architecture Version Repository Size +================================================================================ +Upgrading: + iproute x86_64 6.2.0-6.el8_10 ol8_baseos_latest 853 k + +Transaction Summary +================================================================================ +Upgrade 1 Package + +Total download size: 853 k +Downloading Packages: +iproute-6.2.0-6.el8_10.x86_64.rpm 4.2 MB/s | 853 kB 00:00 +-------------------------------------------------------------------------------- +Total 4.2 MB/s | 853 kB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Upgrading : iproute-6.2.0-6.el8_10.x86_64 1/2 + Cleanup : iproute-6.2.0-5.el8_9.x86_64 2/2 + Running scriptlet: iproute-6.2.0-5.el8_9.x86_64 2/2 + Verifying : iproute-6.2.0-6.el8_10.x86_64 1/2 + Verifying : iproute-6.2.0-5.el8_9.x86_64 2/2 + +Upgraded: + iproute-6.2.0-6.el8_10.x86_64 + +Complete! +24 files removed +Removing intermediate container fe168b01f3ad + ---> 791878694a50 +Step 5/12 : RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 59d7143da358 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 108M 100 108M 0 0 1440k 0 0:01:16 0:01:16 --:--:-- 1578k +Removing intermediate container 59d7143da358 + ---> 17c4534293e5 +Step 6/12 : RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 84b1cbffdc51 +Verifying... ######################################## +Preparing... ######################################## +Updating / installing... +ords-23.4.0-8.el8 ######################################## +INFO: Before starting ORDS service, run the below command as user oracle: + ords --config /etc/ords/config install +Removing intermediate container 84b1cbffdc51 + ---> 6e7151b79588 +Step 7/12 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + ---> Running in 66e5db5f343f +Removing intermediate container 66e5db5f343f + ---> 0523dc897bf4 +Step 8/12 : USER oracle + ---> Running in ffda8495ac77 +Removing intermediate container ffda8495ac77 + ---> 162acd4d0b93 +Step 9/12 : WORKDIR /home/oracle + ---> Running in 8c14310ffbc7 +Removing intermediate container 8c14310ffbc7 + ---> c8dae809e772 +Step 10/12 : VOLUME ["$ORDS_HOME/config/ords"] + ---> Running in ed64548fd997 +Removing intermediate container ed64548fd997 + ---> 22e2c99247b0 +Step 11/12 : EXPOSE 8888 + ---> Running in 921f7c85d61d +Removing intermediate container 921f7c85d61d + ---> e5d503c92224 +Step 12/12 : CMD $ORDS_HOME/$RUN_FILE + ---> Running in cad487298d63 +Removing intermediate container cad487298d63 + ---> fdb17aa242f8 +Successfully built fdb17aa242f8 +Successfully tagged oracle/ords-dboper:latest +08:57:18 oracle@mitk01:# + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/ImagePush.log b/docs/multitenant/ords-based/usecase01/logfiles/ImagePush.log new file mode 100644 index 00000000..9b8df426 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/ImagePush.log @@ -0,0 +1,11 @@ +/usr/bin/docker tag oracle/ords-dboper:latest /ords-dboper:latest +/usr/bin/docker push /ords-dboper:latest +The push refers to repository [/ords-dboper] +aef18205865c: Pushing [=============================> ] 56.55MB/95.45MB +2564d855e579: Pushing [=======> ] 57.08MB/357.6MB +a70a4f9a73c3: Pushed +f283c83ba6ac: Pushed +8c6709989678: Pushing [=======> ] 52.58MB/332.7MB +5bfd57d8f58a: Pushing [========> ] 37.47MB/229.2MB + + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/cdb.log b/docs/multitenant/ords-based/usecase01/logfiles/cdb.log new file mode 100644 index 00000000..c75e9bf8 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/cdb.log @@ -0,0 +1,372 @@ +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M +NOT_INSTALLED=2 + SETUP +==================================================== +CONFIG=/etc/ords/config ++ export ORDS_LOGS=/tmp ++ ORDS_LOGS=/tmp ++ '[' -f /opt/oracle/ords//secrets/webserver_user ']' +++ cat /opt/oracle/ords//secrets/webserver_user ++ WEBSERVER_USER=.... ++ '[' -f /opt/oracle/ords//secrets/webserver_pwd ']' +++ cat /opt/oracle/ords//secrets/webserver_pwd ++ WEBSERVER_PASSWORD=.... ++ '[' -f /opt/oracle/ords//secrets/cdbadmin_user ']' +++ cat /opt/oracle/ords//secrets/cdbadmin_user ++ CDBADMIN_USER=.... ++ '[' -f /opt/oracle/ords//secrets/cdbadmin_pwd ']' +++ cat /opt/oracle/ords//secrets/cdbadmin_pwd ++ CDBADMIN_PWD=.... ++ '[' -f /opt/oracle/ords//secrets/sysadmin_pwd ']' +++ cat /opt/oracle/ords//secrets/sysadmin_pwd ++ SYSDBA_PASSWORD=..... ++ '[' -f /opt/oracle/ords//secrets/sysadmin_pwd ']' +++ cat /opt/oracle/ords//secrets/ords_pwd ++ ORDS_PASSWORD=.... ++ setupHTTPS ++ rm -rf /home/oracle/keystore ++ '[' '!' -d /home/oracle/keystore ']' ++ mkdir /home/oracle/keystore ++ cd /home/oracle/keystore ++ cat ++ rm /home/oracle/keystore/PASSWORD ++ ls -ltr /home/oracle/keystore +total 0 ++ SetParameter ++ /usr/local/bin/ords --config /etc/ords/config config set security.requestValidationFunction false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:22 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: security.requestValidationFunction was set to: false in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config set jdbc.MaxLimit 100 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:23 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.MaxLimit was set to: 100 in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config set jdbc.InitialLimit 50 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:24 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.InitialLimit was set to: 50 in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config set error.externalPath /opt/oracle/ords/error +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:26 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: error.externalPath was set to: /opt/oracle/ords/error ++ /usr/local/bin/ords --config /etc/ords/config config set standalone.access.log /home/oracle +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:27 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.access.log was set to: /home/oracle ++ /usr/local/bin/ords --config /etc/ords/config config set standalone.https.port 8888 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:28 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.port was set to: 8888 ++ /usr/local/bin/ords --config /etc/ords/config config set standalone.https.cert /opt/oracle/ords//secrets/tls.crt +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:29 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert was set to: /opt/oracle/ords//secrets/tls.crt ++ /usr/local/bin/ords --config /etc/ords/config config set standalone.https.cert.key /opt/oracle/ords//secrets/tls.key +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:31 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert.key was set to: /opt/oracle/ords//secrets/tls.key ++ /usr/local/bin/ords --config /etc/ords/config config set restEnabledSql.active true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:32 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: restEnabledSql.active was set to: true in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config set security.verifySSL true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:33 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: security.verifySSL was set to: true ++ /usr/local/bin/ords --config /etc/ords/config config set database.api.enabled true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:34 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.enabled was set to: true ++ /usr/local/bin/ords --config /etc/ords/config config set plsql.gateway.mode false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:35 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +Invalid VALUE argument false for KEY plsql.gateway.mode. ++ /usr/local/bin/ords --config /etc/ords/config config set database.api.management.services.disabled false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:37 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.management.services.disabled was set to: false ++ /usr/local/bin/ords --config /etc/ords/config config set misc.pagination.maxRows 1000 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:38 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: misc.pagination.maxRows was set to: 1000 in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config set db.cdb.adminUser 'C##DBAPI_CDB_ADMIN AS SYSDBA' +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:39 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser was set to: C##DBAPI_CDB_ADMIN AS SYSDBA in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config secret --password-stdin db.cdb.adminUser.password +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:40 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser.password was set to: ****** in configuration: default ++ /usr/local/bin/ords --config /etc/ords/config config user add --password-stdin sql_admin 'SQL Administrator, System Administrator' +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:42 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +Created user sql_admin in file /etc/ords/config/global/credentials ++ /usr/local/bin/ords --config /etc/ords/config install --admin-user 'SYS AS SYSDBA' --db-hostname racnode1.testrac.com --db-port 1521 --db-servicename TESTORDS --feature-db-api true --feature-rest-enabled-sql true --log-folder /tmp --proxy-user --password-stdin +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:45:43 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +Oracle REST Data Services - Non-Interactive Install +Connecting to database user: SYS AS SYSDBA url: jdbc:oracle:thin:@//racnode1.testrac.com:1521/TESTORDS + +Retrieving information.. +Your database connection is to a CDB. ORDS common user ORDS_PUBLIC_USER will be created in the CDB. ORDS schema will be installed in the PDBs. +Root CDB$ROOT - create ORDS common user +PDB PDB$SEED - install ORDS 22.3.0.r2781755 +PDB PDB$SEED - configure PL/SQL gateway user APEX_PUBLIC_USER in ORDS version 22.3.0.r2781755 + +The setting named: db.connectionType was set to: basic in configuration: default +The setting named: db.hostname was set to: racnode1.testrac.com in configuration: default +The setting named: db.port was set to: 1521 in configuration: default +The setting named: db.servicename was set to: TESTORDS in configuration: default +The setting named: db.serviceNameSuffix was set to: in configuration: default +The setting named: plsql.gateway.mode was set to: proxied in configuration: default +The setting named: db.username was set to: ORDS_PUBLIC_USER in configuration: default +The setting named: db.password was set to: ****** in configuration: default +The setting named: security.requestValidationFunction was set to: wwv_flow_epg_include_modules.authorize in configuration: default +2022-10-11T07:45:45.885Z INFO Installing Oracle REST Data Services version 22.3.0.r2781755 in CDB$ROOT +2022-10-11T07:45:46.703Z INFO ... Verified database prerequisites +2022-10-11T07:45:46.946Z INFO ... Created Oracle REST Data Services proxy user +2022-10-11T07:45:46.979Z INFO Completed installation for Oracle REST Data Services version 22.3.0.r2781755. Elapsed time: 00:00:01.71 + +2022-10-11T07:45:46.986Z INFO Installing Oracle REST Data Services version 22.3.0.r2781755 in PDB$SEED +2022-10-11T07:45:47.078Z INFO ... Verified database prerequisites +2022-10-11T07:45:47.290Z INFO ... Created Oracle REST Data Services proxy user +2022-10-11T07:45:47.741Z INFO ... Created Oracle REST Data Services schema +2022-10-11T07:45:48.097Z INFO ... Granted privileges to Oracle REST Data Services +2022-10-11T07:45:51.848Z INFO ... Created Oracle REST Data Services database objects +2022-10-11T07:46:00.829Z INFO Completed installation for Oracle REST Data Services version 22.3.0.r2781755. Elapsed time: 00:00:13.841 + +2022-10-11T07:46:00.898Z INFO Completed configuring PL/SQL gateway user for Oracle REST Data Services version 22.3.0.r2781755. Elapsed time: 00:00:00.68 + +2022-10-11T07:46:00.898Z INFO Completed CDB installation for Oracle REST Data Services version 22.3.0.r2781755. Total elapsed time: 00:00:15.17 + +2022-10-11T07:46:00.898Z INFO Log file written to /tmp/ords_cdb_install_2022-10-11_074545_78000.log +2022-10-11T07:46:00.901Z INFO To run in standalone mode, use the ords serve command: +2022-10-11T07:46:00.901Z INFO ords --config /etc/ords/config serve +2022-10-11T07:46:00.901Z INFO Visit the ORDS Documentation to access tutorials, developer guides and more to help you get started with the new ORDS Command Line Interface (http://oracle.com/rest). ++ '[' 0 -ne 0 ']' ++ StartUp ++ /usr/local/bin/ords --config /etc/ords/config serve --port 8888 --secure +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 22.3 Production on Tue Oct 11 07:46:02 2022 + +Copyright (c) 2010, 2022, Oracle. + +Configuration: + /etc/ords/config/ + +2022-10-11T07:46:02.286Z INFO HTTPS and HTTPS/2 listening on host: 0.0.0.0 port: 8888 +2022-10-11T07:46:02.302Z INFO Disabling document root because the specified folder does not exist: /etc/ords/config/global/doc_root +2022-10-11T07:46:04.636Z INFO Configuration properties for: |default|lo| +db.servicename=TESTORDS +db.serviceNameSuffix= +java.specification.version=19 +conf.use.wallet=true +database.api.management.services.disabled=false +sun.jnu.encoding=UTF-8 +user.region=US +java.class.path=/opt/oracle/ords/ords.war +java.vm.vendor=Oracle Corporation +standalone.https.cert.key=/opt/oracle/ords//secrets/tls.key +sun.arch.data.model=64 +nashorn.args=--no-deprecation-warning +java.vendor.url=https://java.oracle.com/ +resource.templates.enabled=false +user.timezone=UTC +db.port=1521 +java.vm.specification.version=19 +os.name=Linux +sun.java.launcher=SUN_STANDARD +user.country=US +sun.boot.library.path=/usr/java/jdk-19/lib +sun.java.command=/opt/oracle/ords/ords.war --config /etc/ords/config serve --port 8888 --secure +jdk.debug=release +sun.cpu.endian=little +user.home=/home/oracle +oracle.dbtools.launcher.executable.jar.path=/opt/oracle/ords/ords.war +user.language=en +db.cdb.adminUser.password=****** +java.specification.vendor=Oracle Corporation +java.version.date=2022-09-20 +database.api.enabled=true +java.home=/usr/java/jdk-19 +db.username=ORDS_PUBLIC_USER +file.separator=/ +java.vm.compressedOopsMode=32-bit +line.separator= + +restEnabledSql.active=true +java.specification.name=Java Platform API Specification +java.vm.specification.vendor=Oracle Corporation +java.awt.headless=true +standalone.https.cert=/opt/oracle/ords//secrets/tls.crt +db.hostname=racnode1.testrac.com +db.password=****** +sun.management.compiler=HotSpot 64-Bit Tiered Compilers +security.requestValidationFunction=wwv_flow_epg_include_modules.authorize +misc.pagination.maxRows=1000 +java.runtime.version=19+36-2238 +user.name=oracle +error.externalPath=/opt/oracle/ords/error +stdout.encoding=UTF-8 +path.separator=: +db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA +os.version=5.4.17-2136.308.9.el7uek.x86_64 +java.runtime.name=Java(TM) SE Runtime Environment +file.encoding=UTF-8 +plsql.gateway.mode=proxied +security.verifySSL=true +standalone.https.port=8888 +java.vm.name=Java HotSpot(TM) 64-Bit Server VM +java.vendor.url.bug=https://bugreport.java.com/bugreport/ +java.io.tmpdir=/tmp +oracle.dbtools.cmdline.ShellCommand=ords +java.version=19 +user.dir=/home/oracle/keystore +os.arch=amd64 +java.vm.specification.name=Java Virtual Machine Specification +jdbc.MaxLimit=100 +oracle.dbtools.cmdline.home=/opt/oracle/ords +native.encoding=UTF-8 +java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib +java.vendor=Oracle Corporation +java.vm.info=mixed mode, sharing +stderr.encoding=UTF-8 +java.vm.version=19+36-2238 +sun.io.unicode.encoding=UnicodeLittle +jdbc.InitialLimit=50 +db.connectionType=basic +java.class.version=63.0 +standalone.access.log=/home/oracle + +2022-10-11T07:46:06.669Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 22.3.0.r2781755 +Oracle REST Data Services server info: jetty/10.0.11 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 19+36-2238 + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/cdb_creation.log b/docs/multitenant/ords-based/usecase01/logfiles/cdb_creation.log new file mode 100644 index 00000000..b4602f54 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/cdb_creation.log @@ -0,0 +1,357 @@ +/usr/local/go/bin/kubectl logs -f `/usr/local/go/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. +ORDSVERSIN:23.4.0-8 +NOT_INSTALLED=2 + SETUP +==================================================== +CONFIG=/etc/ords/config +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:16 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.connectionType was set to: customurl in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:18 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.customURL was set to: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:20 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: security.requestValidationFunction was set to: false in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:22 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.MaxLimit was set to: 100 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:24 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.InitialLimit was set to: 50 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:25 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: error.externalPath was set to: /opt/oracle/ords/error +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:27 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.access.log was set to: /home/oracle +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:29 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.port was set to: 8888 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:31 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert was set to: /opt/oracle/ords//secrets/tls.crt +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:33 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert.key was set to: /opt/oracle/ords//secrets/tls.key +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:35 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: restEnabledSql.active was set to: true in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:37 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: security.verifySSL was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:39 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.enabled was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:41 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: plsql.gateway.mode was set to: disabled in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:43 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.management.services.disabled was set to: false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:45 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: misc.pagination.maxRows was set to: 1000 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:47 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser was set to: C##DBAPI_CDB_ADMIN AS SYSDBA in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:49 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser.password was set to: ****** in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:51 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Created user welcome in file /etc/ords/config/global/credentials +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:53 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Oracle REST Data Services - Non-Interactive Install + +Retrieving information... +Completed verifying Oracle REST Data Services schema version 23.4.0.r3461619. +Connecting to database user: ORDS_PUBLIC_USER url: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +The setting named: db.serviceNameSuffix was set to: in configuration: default +The setting named: db.username was set to: ORDS_PUBLIC_USER in configuration: default +The setting named: db.password was set to: ****** in configuration: default +The setting named: security.requestValidationFunction was set to: ords_util.authorize_plsql_gateway in configuration: default +2024-08-20T07:21:57.563Z INFO Oracle REST Data Services schema version 23.4.0.r3461619 is installed. +2024-08-20T07:21:57.565Z INFO To run in standalone mode, use the ords serve command: +2024-08-20T07:21:57.565Z INFO ords --config /etc/ords/config serve +2024-08-20T07:21:57.565Z INFO Visit the ORDS Documentation to access tutorials, developer guides and more to help you get started with the new ORDS Command Line Interface (http://oracle.com/rest). +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:59 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +2024-08-20T07:21:59.739Z INFO HTTP and HTTP/2 cleartext listening on host: 0.0.0.0 port: 8080 +2024-08-20T07:21:59.741Z INFO HTTPS and HTTPS/2 listening on host: 0.0.0.0 port: 8888 +2024-08-20T07:21:59.765Z INFO Disabling document root because the specified folder does not exist: /etc/ords/config/global/doc_root +2024-08-20T07:21:59.765Z INFO Default forwarding from / to contextRoot configured. +2024-08-20T07:22:05.313Z INFO Configuration properties for: |default|lo| +db.serviceNameSuffix= +java.specification.version=22 +conf.use.wallet=true +database.api.management.services.disabled=false +sun.jnu.encoding=UTF-8 +user.region=US +java.class.path=/opt/oracle/ords/ords.war +java.vm.vendor=Oracle Corporation +standalone.https.cert.key=/opt/oracle/ords//secrets/tls.key +sun.arch.data.model=64 +nashorn.args=--no-deprecation-warning +java.vendor.url=https://java.oracle.com/ +resource.templates.enabled=false +user.timezone=UTC +java.vm.specification.version=22 +os.name=Linux +sun.java.launcher=SUN_STANDARD +user.country=US +sun.boot.library.path=/usr/java/jdk-22/lib +sun.java.command=/opt/oracle/ords/ords.war --config /etc/ords/config serve --port 8888 --secure +jdk.debug=release +sun.cpu.endian=little +user.home=/home/oracle +oracle.dbtools.launcher.executable.jar.path=/opt/oracle/ords/ords.war +user.language=en +db.cdb.adminUser.password=****** +java.specification.vendor=Oracle Corporation +java.version.date=2024-07-16 +database.api.enabled=true +java.home=/usr/java/jdk-22 +db.username=ORDS_PUBLIC_USER +file.separator=/ +java.vm.compressedOopsMode=32-bit +line.separator= + +restEnabledSql.active=true +java.specification.name=Java Platform API Specification +java.vm.specification.vendor=Oracle Corporation +java.awt.headless=true +standalone.https.cert=/opt/oracle/ords//secrets/tls.crt +db.password=****** +sun.management.compiler=HotSpot 64-Bit Tiered Compilers +security.requestValidationFunction=ords_util.authorize_plsql_gateway +misc.pagination.maxRows=1000 +java.runtime.version=22.0.2+9-70 +user.name=oracle +error.externalPath=/opt/oracle/ords/error +stdout.encoding=UTF-8 +path.separator=: +db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA +os.version=5.4.17-2136.329.3.1.el7uek.x86_64 +java.runtime.name=Java(TM) SE Runtime Environment +file.encoding=UTF-8 +plsql.gateway.mode=disabled +security.verifySSL=true +standalone.https.port=8888 +java.vm.name=Java HotSpot(TM) 64-Bit Server VM +java.vendor.url.bug=https://bugreport.java.com/bugreport/ +java.io.tmpdir=/tmp +oracle.dbtools.cmdline.ShellCommand=ords +java.version=22.0.2 +user.dir=/home/oracle +os.arch=amd64 +java.vm.specification.name=Java Virtual Machine Specification +jdbc.MaxLimit=100 +oracle.dbtools.cmdline.home=/opt/oracle/ords +native.encoding=UTF-8 +java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib +java.vendor=Oracle Corporation +java.vm.info=mixed mode, sharing +stderr.encoding=UTF-8 +java.vm.version=22.0.2+9-70 +sun.io.unicode.encoding=UnicodeLittle +jdbc.InitialLimit=50 +db.connectionType=customurl +java.class.version=66.0 +db.customURL=jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +standalone.access.log=/home/oracle + +2024-08-20T07:22:09.268Z INFO + +Mapped local pools from /etc/ords/config/databases: + /ords/ => default => VALID + + +2024-08-20T07:22:09.414Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 23.4.0.r3461619 +Oracle REST Data Services server info: jetty/10.0.18 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 22.0.2+9-70 + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log b/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log new file mode 100644 index 00000000..e3915a21 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log @@ -0,0 +1,19 @@ +CREATING TLS CERTIFICATES +/usr/bin/openssl genrsa -out ca.key 2048 +Generating RSA private key, 2048 bit long modulus (2 primes) +......................+++++ +..................................................+++++ +e is 65537 (0x010001) +/usr/bin/openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost Root CA " -out ca.crt +/usr/bin/openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost" -out server.csr +Generating a RSA private key +...........+++++ +...........................................+++++ +writing new private key to 'tls.key' +----- +/usr/bin/echo "subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com" > extfile.txt +/usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt +Signature ok +subject=C = US, ST = California, L = SanFrancisco, O = "oracle ", CN = "cdb-dev-ords.oracle-database-operator-system ", CN = localhost +Getting CA Private Key + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log b/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log new file mode 100644 index 00000000..b787b752 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log @@ -0,0 +1,39 @@ +ORDS: Release 23.4 Production on Tue Aug 20 07:48:44 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Database pool: default + +Setting Value Source +----------------------------------------- -------------------------------------------------- ----------- +database.api.enabled true Global +database.api.management.services.disabled false Global +db.cdb.adminUser C##DBAPI_CDB_ADMIN AS SYSDBA Pool +db.cdb.adminUser.password ****** Pool Wallet +db.connectionType customurl Pool +db.customURL jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90 Pool + )(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNEC + T_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL= + TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONL + Y))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST= + scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNEC + T_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +db.password ****** Pool Wallet +db.serviceNameSuffix Pool +db.username ORDS_PUBLIC_USER Pool +error.externalPath /opt/oracle/ords/error Global +jdbc.InitialLimit 50 Pool +jdbc.MaxLimit 100 Pool +misc.pagination.maxRows 1000 Pool +plsql.gateway.mode disabled Pool +restEnabledSql.active true Pool +security.requestValidationFunction ords_util.authorize_plsql_gateway Pool +security.verifySSL true Global +standalone.access.log /home/oracle Global +standalone.https.cert /opt/oracle/ords//secrets/tls.crt Global +standalone.https.cert.key /opt/oracle/ords//secrets/tls.key Global +standalone.https.port 8888 Global + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/tagandpush.log b/docs/multitenant/ords-based/usecase01/logfiles/tagandpush.log new file mode 100644 index 00000000..232d5bb2 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/tagandpush.log @@ -0,0 +1,14 @@ +/usr/bin/docker tag oracle/ords-dboper:latest [.......]/ords-dboper:latest + +/usr/bin/docker push [your container registry]/ords-dboper:latest +The push refers to repository [your container registry] +0405aac3af1c: Pushed +6be46e8e1e21: Pushed +c9884830a66d: Pushed +a46244557bb9: Pushing [===========================> ] 261.8MB/469.9MB +f988845e261e: Pushed +fe07ec0b1f5a: Layer already exists +2ac63de5f950: Layer already exists +386cd7a64c01: Layer already exists +826c69252b8b: Layer already exists + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/testapi.log b/docs/multitenant/ords-based/usecase01/logfiles/testapi.log new file mode 100644 index 00000000..cb42ecc3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/testapi.log @@ -0,0 +1,62 @@ +kubectl exec -it `kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/bin/curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8888 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/pki/tls/certs/ca-bundle.crt + CApath: none +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, [no content] (0): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, [no content] (0): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=US; ST=California; L=SanFrancisco; O=oracle ; CN=cdb-dev-ords.oracle-database-operator-system ; CN=localhost +* start date: Aug 20 07:14:04 2024 GMT +* expire date: Aug 20 07:14:04 2025 GMT +* issuer: C=US; ST=California; L=SanFrancisco; O=oracle ; CN=cdb-dev-ords.oracle-database-operator-system ; CN=localhost Root CA +* SSL certificate verify result: unable to get local issuer certificate (20), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* TLSv1.3 (OUT), TLS app data, [no content] (0): +* TLSv1.3 (OUT), TLS app data, [no content] (0): +* TLSv1.3 (OUT), TLS app data, [no content] (0): +* Using Stream ID: 1 (easy handle 0x55d14a7dea90) +* TLSv1.3 (OUT), TLS app data, [no content] (0): +> GET /ords/_/db-api/stable/metadata-catalog/ HTTP/2 +> Host: localhost:8888 +> User-Agent: curl/7.61.1 +> Accept: */* +> +* TLSv1.3 (IN), TLS handshake, [no content] (0): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, [no content] (0): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* Connection state changed (MAX_CONCURRENT_STREAMS == 128)! +* TLSv1.3 (OUT), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +< HTTP/2 200 +< content-type: application/json +< +* TLSv1.3 (IN), TLS handshake, [no content] (0): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* Connection #0 to host localhost left intact +{"items":[{"name":"default","links":[{"rel":"canonical","href":"https://localhost:8888/ords/_/db-api/stable/metadata-catalog/openapi.json","mediaType":"application/vnd.oai.openapi+json;version=3.0"}]}],"links":[{"rel":"self","href":"https://localhost:8888/ords/_/db-api/stable/metadata-catalog/"},{"rel":"describes","href":"https://localhost:8888/ords/_/db-api/stable/"}]} diff --git a/docs/multitenant/ords-based/usecase01/makefile b/docs/multitenant/ords-based/usecase01/makefile new file mode 100644 index 00000000..ec454e28 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/makefile @@ -0,0 +1,906 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# 3) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 4) make runall01 +# Start a series of operation create open close delete and so on +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# cdbnamespace_binding.yaml : role binding for cdbnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_cdb_secret.yaml : create secrets for ords server pod +# create_pdb_secret.yaml : create secrets for pluggable database +# create_ords_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_pdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export ORDPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDPWD|cut -d : -f 2) +export SYSPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep SYSPWD|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export CDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBUSR|cut -d : -f 2) +export CDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBPWD|cut -d : -f 2) +export OPRNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep OPRNAMESPACE|cut -d : -f 2) +export OPRNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep OPRNAMESPACE|cut -d : -f 2) +export ORDSIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDSIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m +export IMAGE=oracle/ords-dboper:latest +export ORDSIMGDIR=../../../../ords + +REST_SERVER=ords +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +RUNTIME=/usr/bin/podman + +################# +### FILE LIST ### +################# + +export ORDS_POD=create_ords_pod.yaml + +export CDB_SECRETS=create_cdb_secrets.yaml +export PDB_SECRETS=create_pdb_secrets.yaml + +export PDBCRE1=create_pdb1_resource.yaml +export PDBCRE2=create_pdb2_resource.yaml + +export PDBCLOSE1=close_pdb1_resource.yaml +export PDBCLOSE2=close_pdb2_resource.yaml +export PDBCLOSE3=close_pdb3_resource.yaml + +export PDBOPEN1=open_pdb1_resource.yaml +export PDBOPEN2=open_pdb2_resource.yaml +export PDBOPEN3=open_pdb3_resource.yaml + +export PDBCLONE1=clone_pdb1_resource.yaml +export PDBCLONE2=clone_pdb2_resource.yaml + +export PDBDELETE1=delete_pdb1_resource.yaml +export PDBDELETE2=delete_pdb2_resource.yaml +export PDBDELETE3=delete_pdb3_resource.yaml + +export PDBUNPLUG1=unplug_pdb1_resource.yaml +export PDBPLUG1=plug_pdb1_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +check: + $(call msg,"CHECK PARAMETERS") + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "ORDPWD.................:%s\n" $(ORDPWD) + @printf "SYSPWD.................:%s\n" $(SYSPWD) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "CDBUSR.................:%s\n" $(CDBUSR) + @printf "CDBPWD.................:%s\n" $(CDBPWD) + @printf "OPRNAMESPACE...........:%s\n" $(OPRNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + + +tlscrt: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(OPRNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(OPRNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +tlssec: + $(call msg,"GENERATE TLS SECRET") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(OPRNAMESPACE) + + +delsec: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(OPRNAMESPACE) -o custom-columns=":metadata.name" --no-headers|grep -v webhook-server-cert) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(OPRNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(OPRNAMESPACE))\ + || ( echo "No screts in namespace $(OPRNAMESPACE)") + + +###### ENCRYPTED SECRETS ###### +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + + + +secrets: delsec tlscrt tlssec + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(OPRNAMESPACE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(CDBPWD) > $(CDBPWDFILE) + @$(ECHO) $(CDBUSR) > $(CDBUSRFILE) + @$(ECHO) $(SYSPWD) > $(SYSPWDFILE) + @$(ECHO) $(ORDPWD) > $(ORDPWDFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBPWDFILE) |base64 > e_$(CDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBUSRFILE) |base64 > e_$(CDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE) |base64 > e_$(SYSPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic cdbpwd --from-file=e_$(CDBPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic cdbusr --from-file=e_$(CDBUSRFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic syspwd --from-file=e_$(SYSPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic ordpwd --from-file=e_$(ORDPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(OPRNAMESPACE) + $(RM) $(WBUSERFILE) $(WBPASSFILE) $(CDBPWDFILE) $(CDBUSRFILE) $(SYSPWDFILE) $(ORDPWDFILE) $(PDBUSRFILE) $(PDBPWDFILE) + $(RM) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(CDBPWDFILE) e_$(CDBUSRFILE) e_$(SYSPWDFILE) e_$(ORDPWDFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + + +### YAML FILE SECTION ### +operator: + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection01.yaml + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +cat< authsection02.yaml + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${OPRNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${OPRNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${OPRNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${OPRNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + +#echo ords pod creation +define _script01 +cat < ${ORDS_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ${ORDSIMG} + ordsImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true +EOF + +cat authsection01.yaml >> ${ORDS_POD} + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${PDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${PDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${PDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${PDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + +cat < ${PDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb4 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + + +cat < ${PDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" +EOF + +cat <${PDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" +EOF + +cat <${PDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${PDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${PDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +## Auth information +for _file in ${PDBCRE1} ${PDBCRE2} ${PDBOPEN1} ${PDBOPEN2} ${PDBOPEN3} ${PDBCLOSE1} ${PDBCLOSE2} ${PDBCLOSE3} ${PDBCLONE1} ${PDBCLONE2} ${PDBDELETE1} ${PDBDELETE2} ${PDBUNPLUG1} ${PDBPLUG1} ${PDBMAP1} ${PDBMAP2} ${PDBMAP3} +do +ls -ltr ${_file} + cat authsection02.yaml >> ${_file} +done +rm authsection02.yaml +rm authsection01.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(PDBMAP3) $(PDBMAP2) $(PDBMAP1) $(PDBPLUG1) $(PDBUNPLUG1) $(PDBDELETE2) $(PDBDELETE1) $(PDBCLONE2) $(PDBCLONE1) $(PDBCLOSE3) $(PDBCLOSE2) $(PDBCLOSE1) $(PDBOPEN3) $(PDBOPEN2) $(PDBOPEN1) $(PDBCRE2) $(PDBCRE1) $(ORDS_POD) $(CDB_SECRETS) $(PDB_SECRETS) + - $(RM) ${OPRNAMESPACE}_binding.yaml ${OPRNAMESPACE}_binding.yaml + + +cleancrt: + - $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep ords|cut -d ' ' -f 1` -n $(OPRNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"cdb pod creation") + - $(KUBECTL) delete cdb cdb-dev -n $(OPRNAMESPACE) + $(KUBECTL) apply -f $(ORDS_POD) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" cdb cdb-dev -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"cdb pod completed") + $(KUBECTL) get cdb -n $(OPRNAMESPACE) + $(KUBECTL) get pod -n $(OPRNAMESPACE) + +run01.1: + @$(call msg,"pdb pdb1 creation") + $(KUBECTL) apply -f $(PDBCRE1) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 creation completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run01.2: + @$(call msg, "pdb pdb2 creation") + $(KUBECTL) apply -f $(PDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb2 creation completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + +run02.1: + @$(call msg, "pdb pdb1 open") + $(KUBECTL) apply -f $(PDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 open completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run02.2: + @$(call msg,"pdb pdb2 open") + $(KUBECTL) apply -f $(PDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 open completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(PDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb3 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get pdb pdb3 -n $(OPRNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(OPRNAMESPACE) + + +run04.1: + @$(call msg,"pdb pdb1 close") + $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 close completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run04.2: + @$(call msg,"pdb pdb2 close") + $(KUBECTL) apply -f $(PDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 close completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + +run05.1: + @$(call msg,"pdb pdb1 unplug") + $(KUBECTL) apply -f $(PDBUNPLUG1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 unplug completed") + +run06.1: + @$(call msg, "pdb pdb1 plug") + $(KUBECTL) apply -f $(PDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 plug completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run07.1: + @$(call msg,"pdb pdb1 delete ") + - $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(PDBDELETE1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 delete") + $(KUBECTL) get pdb -n $(OPRNAMESPACE) + +run99.1: + $(KUBECTL) delete cdb cdb-dev -n cdbnamespace + $(KUBECTL) wait --for=delete cdb cdb-dev -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get cdb -n cdbnamespaace + $(KUBECTL) get pdb -n pdbnamespaace + + +## SEQ | ACTION +## ----+---------------- +## 00 | create ords pod +## 01 | create pdb +## 02 | open pdb +## 03 | clone pdb +## 04 | close pdb +## 05 | unpug pdb +## 06 | plug pdb +## 07 | delete pdb (declarative) + + +runall01: run00 run01.1 run01.2 run03.1 run03.2 run04.1 run05.1 run06.1 run02.1 run07.1 + + +###### BUILD ORDS IMAGE ###### + +createimage: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) + +createimageproxy: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) --build-arg https_proxy=$(HTTPS_PROXY) --build-arg http_proxy=$(HTTP_PROXY) + +tagimage: + @echo "TAG IMAGE" + $(RUNTIME) tag $(IMAGE) $(ORDSIMG) + +push: + $(RUNTIME) push $(ORDSIMG) + + diff --git a/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml new file mode 100644 index 00000000..18cb35b1 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml new file mode 100644 index 00000000..85899597 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml new file mode 100644 index 00000000..9c2c1cd3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml new file mode 100644 index 00000000..63a0a49c --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml new file mode 100644 index 00000000..8c4eed0d --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml new file mode 100644 index 00000000..5f0e4b77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml b/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml new file mode 100644 index 00000000..79e44269 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: oracle-database-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/ords-based/usecase01/oracle-database-operator.yaml b/docs/multitenant/ords-based/usecase01/oracle-database-operator.yaml new file mode 120000 index 00000000..d5bae7bc --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/oracle-database-operator.yaml @@ -0,0 +1 @@ +../../../oracle-database-operator.yaml \ No newline at end of file diff --git a/docs/multitenant/ords-based/usecase01/parameters.txt b/docs/multitenant/ords-based/usecase01/parameters.txt new file mode 100644 index 00000000..0a7b394a --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/parameters.txt @@ -0,0 +1,61 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +ORDSIMG:_your_container_registry/ords-dboper:latest + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"T H I S I S J U S T A N E X A M P L E ....(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## ORDS PUBLIC USER ## +########################################### +ORDPWD:Change_me_please + +########################################### +## SYSPASSWORD ## +########################################### +SYSPWD:Change_me_please + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:Change_me_please +WBPASS:Change_me_please + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:Change_me_please +PDBPWD:Change_me_please + +##################### +## CDB ADMIN USER ### +##################### + +CDBUSR:C##DBAPI_CDB_ADMIN +CDBPWD:Change_me_please + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +CDBNAMESPACE:cdbnamespace + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/ords-based/usecase01/pdb_close.yaml b/docs/multitenant/ords-based/usecase01/pdb_close.yaml new file mode 100644 index 00000000..5917d33a --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_close.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + diff --git a/docs/multitenant/ords-based/usecase01/pdb_create.yaml b/docs/multitenant/ords-based/usecase01/pdb_create.yaml new file mode 100644 index 00000000..be3581ad --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_create.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + assertivePdbDeletion: true + diff --git a/docs/multitenant/ords-based/usecase01/pdb_delete.yaml b/docs/multitenant/ords-based/usecase01/pdb_delete.yaml new file mode 100644 index 00000000..c22b546a --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_delete.yaml @@ -0,0 +1,34 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + diff --git a/docs/multitenant/ords-based/usecase01/pdb_map.yaml b/docs/multitenant/ords-based/usecase01/pdb_map.yaml new file mode 100644 index 00000000..3300a7fa --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_map.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + assertivePdbDeletion: true diff --git a/docs/multitenant/ords-based/usecase01/pdb_open.yaml b/docs/multitenant/ords-based/usecase01/pdb_open.yaml new file mode 100644 index 00000000..25fdccc4 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_open.yaml @@ -0,0 +1,43 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" diff --git a/docs/multitenant/ords-based/usecase01/pdb_secret.yaml b/docs/multitenant/ords-based/usecase01/pdb_secret.yaml new file mode 100644 index 00000000..60d95d76 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/pdb_secret.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: pdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + sysadmin_user: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." + diff --git a/docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml new file mode 100644 index 00000000..0e86e10c --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml @@ -0,0 +1,53 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/server.csr b/docs/multitenant/ords-based/usecase01/server.csr new file mode 100644 index 00000000..e308d301 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/server.csr @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIC3TCCAcUCAQAwgZcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh +MRUwEwYDVQQHDAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNV +BAMMLWNkYi1kZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVt +IDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAm9nlNSQNsPTVqH57MkWKZEyaVtzVKQ8Z3oDK6hWXfB24p0jVj6sTOJkf +NVAxnqmU8DpW3odpbU6qWe/n+B5vJpqdXUGdsq9NKyus2fGb/xf1UnskpA2FUuWZ +o3upyCFxDAOvE4eZUzlxIn+54XXaNAdQiU9E8VXPr5YxrvZ15T/xCXLtJPs/RCOF +cJ8+gvZGcjMbdP16auJDVWZzBaur3eKbiHN7LXNCCRzGO++dv0kGY8vH7MyFfgp3 +qYBiSHS3WDiFUJjYIvfa8lLfP1hnlCyHn8TnU9gjGjmd1YcccSKqWIAT24wPUKVU +Lme4n91jxDPp7g8nRtDw0Smj9gYCtQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEB +AGOG/9IJJRvT2JLcuzE5Arai1XHc6Jh65iuDRqXQav47Bz38FFF2gZNO69gzDmhq +6k7tie+5bPcAHuuJZ0dAa71a9SLjKl+XNkkI0vS6te6OK3DCVUoMqNCk5VdwrJw0 +RORbKUwgLEG6mu80Gc/6wCdeR/36hoYTMeNPjm6M9e+X5ppsXqxCNsgDxasJFT82 +FejuJE2sZ6RCradlDToUHNS1dMLoW0WAIISqOmrDvEI6snm9ZZr3Sxo1auEtpI6v +NllBM4AgEghy/2mAtke+By4WHCfXBpxEGv9S7ATqJHYrR5Qa3nwx0eojWW1vmn0/ +aEzslX1tAH6oz2jA6QZ0sNo= +-----END CERTIFICATE REQUEST----- diff --git a/docs/multitenant/ords-based/usecase01/tde_secret.yaml b/docs/multitenant/ords-based/usecase01/tde_secret.yaml new file mode 100644 index 00000000..7cf66c03 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/tde_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: tde1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + tdepassword: "bW1hbHZlenoK" + tdesecret: "bW1hbHZlenoK" + + + + diff --git a/docs/multitenant/ords-based/usecase01/tls.crt b/docs/multitenant/ords-based/usecase01/tls.crt new file mode 100644 index 00000000..6bf8aef4 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/tls.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFDCCAvygAwIBAgIUd9l6tMS21ak3e4S0VdPhY0jG3gQwDQYJKoZIhvcNAQEL +BQAwgaExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQH +DAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNVBAMMLWNkYi1k +ZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVtIDEcMBoGA1UE +AwwTbG9jYWxob3N0ICBSb290IENBIDAeFw0yNDA4MTIxNTMyMzVaFw0yNTA4MTIx +NTMyMzVaMIGXMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMG +A1UEBwwMU2FuRnJhbmNpc2NvMRAwDgYDVQQKDAdvcmFjbGUgMTYwNAYDVQQDDC1j +ZGItZGV2LW9yZHMub3JhY2xlLWRhdGFiYXNlLW9wZXJhdG9yLXN5c3RlbSAxEjAQ +BgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AJvZ5TUkDbD01ah+ezJFimRMmlbc1SkPGd6AyuoVl3wduKdI1Y+rEziZHzVQMZ6p +lPA6Vt6HaW1Oqlnv5/gebyaanV1BnbKvTSsrrNnxm/8X9VJ7JKQNhVLlmaN7qcgh +cQwDrxOHmVM5cSJ/ueF12jQHUIlPRPFVz6+WMa72deU/8Qly7ST7P0QjhXCfPoL2 +RnIzG3T9emriQ1VmcwWrq93im4hzey1zQgkcxjvvnb9JBmPLx+zMhX4Kd6mAYkh0 +t1g4hVCY2CL32vJS3z9YZ5Qsh5/E51PYIxo5ndWHHHEiqliAE9uMD1ClVC5nuJ/d +Y8Qz6e4PJ0bQ8NEpo/YGArUCAwEAAaNMMEowSAYDVR0RBEEwP4IsY2RiLWRldi1v +cmRzLm9yYWNsZS1kYXRhYmFzZS1vcGVyYXRvci1zeXN0ZW2CD3d3dy5leGFtcGxl +LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAh7Lsu2ITS6Bc2q/Ef4No5Us0Vo9BWKoL +AlrfQPjsv1erMGsyEEyZ0Cg8l3QrXlscQ1ESvx0BnRGjoqZGE4+PoVZTEYSkokXP +aAr69epPzXQRyyAGCg5GeL6IFAj1AzqJGNnKOrPaLpcTri4MboiWmW+MHmgLdyPK +iwl8bNa8841nK/L/m6QET15BI+MIAvn7pgcpztum5jmkB+eceXzXnKUGg77TaFiX +bXqVBR4EvexC4DgUfQJI4zJLFdcH/GHxCpaaXNjbXeVz1ZK/qo2TCrXp2UXVrznU +9VTUuCaQA2VYZCitvAbupt+1OvMFYhWiIAroJSmzrvH4oK+IXgY6GA== +-----END CERTIFICATE----- diff --git a/docs/multitenant/ords-based/usecase01/tls.key b/docs/multitenant/ords-based/usecase01/tls.key new file mode 100644 index 00000000..666c5639 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/tls.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCb2eU1JA2w9NWo +fnsyRYpkTJpW3NUpDxnegMrqFZd8HbinSNWPqxM4mR81UDGeqZTwOlbeh2ltTqpZ +7+f4Hm8mmp1dQZ2yr00rK6zZ8Zv/F/VSeySkDYVS5Zmje6nIIXEMA68Th5lTOXEi +f7nhddo0B1CJT0TxVc+vljGu9nXlP/EJcu0k+z9EI4Vwnz6C9kZyMxt0/Xpq4kNV +ZnMFq6vd4puIc3stc0IJHMY7752/SQZjy8fszIV+CnepgGJIdLdYOIVQmNgi99ry +Ut8/WGeULIefxOdT2CMaOZ3VhxxxIqpYgBPbjA9QpVQuZ7if3WPEM+nuDydG0PDR +KaP2BgK1AgMBAAECggEAKUwl1l0FW7yk2Q8a6glPUKCTzSybN1QPEMyj+D9ccsEV +aw57uKQmZbr9cA0d+OMK2lU7K6BKKXLM5SQTHcZCwcH6rPl0JiMZmbTrCp1hLslU +clS7MtV6XKsGeTGNncBuyjY3sD8gO9NezTt3L+0gsuS1TI06wZBxhh+QbsJUHzjW +bC3mNjD4SqXree4Snp05nlFaT2s2isIjj25mKDwBu8IX0BN2VjsaSiQcjb8Dmzmu +42Xh7bcWBebns8Ehuq9TIl6ZjQht+pmVOMlB862baVpW/9CxkknzM+UQhIkXTSJk +Jt/mGeO89V4/Zh2N4ixIOE1hw87EvRFBoYh2VF58QQKBgQDMujXYblh+eEdsB1LG +kY0LerFHuQgdzifYmjPl0jtBsWDmh5i6q9PRUs2JZ/Fsq4QMQ8SLinGzaIBq5FKr +CL067X5blrFA9H0D6exJI3iHBTQpeMFwtqvu3j+zpCmgzonaUDQrczUpc0hxU7YI +/jhDe9LSWknPrzzMoWWKuy0sTQKBgQDC4g8F2krqm9Q5ug8bRKTAvMrY0skFIwrP +5LXBq9C8YCnLnT4S4tYQfbnWaBeG7YpkkmkZe30c9MUjsr1OHZbo+jlxHBU+oRYZ +e1j0UorVGt7FfNe/zjW0fLd72CBO741EDvV6pVeItkAwH6P5/cbRu085dwvyFbxv +JmOaYddECQKBgQCuid6YG1NE10SE3CV89uAZtktny18ZEgY0ixrNx5MPaaskPtw9 +4Xofjol+qOhR7lQQpMHu+WQAQYqiFvBHspapo4pDiVCrAQWIDamNnTkHW69h3/qD +HqmsZzxF6iI3X351akVf+cOMCCXtwCGEvz+2gN12ytT8w/iAuOS6BuP3TQKBgBlf +v57+diSn13EQtajSPjVOH4ctorjFgEHjQHsP+OSeDLMTLSLeYArTo9+zu+R4hz1j +BsYnmvmrMQPd4OIL3jtFYTdF9coqxSraMZHWMXdfwUOrZpf1rG5skqNQV5yPejAz +Vmj6oDQPrrnVVM9W6I0kO0N7KZYCmH9MW0mdlZ6pAoGAB60f2sk35VUBpvh7qzTY +70WDbNnCCU3I3KZ7LCUwUPWzGLQwMXRlAb5ZMheT/SGPChX4QXCNUCjXkR3Am3NO +yURHqZIRy0bwZRVjYnlCtc9YQ8pB0isZ1z2a9FXRD75o2WboFZ+VsG0FU81IE2ZO +gW802gT76NRnz851B7/nFNs= +-----END PRIVATE KEY----- diff --git a/docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml new file mode 100644 index 00000000..61fe915d --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase02/README.md b/docs/multitenant/ords-based/usecase02/README.md new file mode 100644 index 00000000..39978747 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/README.md @@ -0,0 +1,523 @@ + + + +# UNPLUG - PLUG - CLONE + +- [UNPLUG - PLUG - CLONE](#unplug---plug---clone) + - [INTRODUCTION](#introduction) + - [UNPLUG DATABASE](#unplug-database) + - [PLUG DATABASE](#plug-database) + - [CLONE PDB](#clone-pdb) + +### INTRODUCTION + +> ☞ The examples of this folder are based on single namespace **oracle-database-operator-system** + +This page explains how to plug and unplug database a pdb; it assumes that you have already configured a pluggable database (see [usecase01](../usecase01/README.md)). Check yaml parameters in the CRD tables in the main [README](../README.md) file. + +```text + + + +--------------------------------+ + UNPLUG PDB PLUG PDB | CLONE PDB | + | | + +-----------+ +-----------+ | +-----------+ +----------+ | + | PDB | | PDB | | | PDB | |CLONED PDB| | + +----+------+ +----+------+ | +----+------+ +----------+ | + | | | | | | ++----> UNPLUG -----+ +--> PLUG | CLONE ---------+ | +| | | | | | | | +| +----+------+ | | +----+------+ | +----+------+ | +| | Container | | | | Container | | | Container | | +| | | | | | | | | | | +| +-----------+ | | +-----------+ | +-----------+ | +| | | | | +| +------+----+ | | kubectk apply -f pdb_clone.yaml| +| | | | | | +| +------|-----------|--------+ | +--------------------------------+ +| | +----+----+ +--+------+ | | +| | |xml file | |DB FILES | |--+ +| | +---------+ +---------+ | | +| +---------------------------+ | +| | +| | ++- kubectl apply -f pdb_unplug.yaml | + | + kubectl apply -f pdb_plug.yaml-----+ +``` + +### UNPLUG DATABASE + +Use the following command to check kubernets pdb resources. Note that the output of the commands can be tailored to meet your needs. Just check the structure of pdb resource **kubectl get pdbs -n oracle-database-operator-system -o=json** and modify the script accordingly. For the sake of simplicity put this command in a single script **checkpdbs.sh**. + +```bash +kubectl get pdbs -n oracle-database-operator-system -o=jsonpath='{range .items[*]} +{"\n==================================================================\n"} +{"CDB="}{.metadata.labels.cdb} +{"K8SNAME="}{.metadata.name} +{"PDBNAME="}{.spec.pdbName} +{"OPENMODE="}{.status.openMode} +{"ACTION="}{.status.action} +{"MSG="}{.status.msg} +{"\n"}{end}' +``` + +We assume that the pluggable database pdbdev is already configured and opened in read write mode + +```bash +./checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=READ WRITE +ACTION=CREATE +MSG=Success + +``` + +Prepare a new yaml file **pdb_unplug.yaml** to unplug the pdbdev database. Make sure that the path of the xml file is correct and check the existence of all the required secrets. Do not reuse an existing xml files. + +```yaml +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +#pdb_unplug.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdbunplug.xml" + action: "Unplug" + [ secret sections ] +``` + +Close the pluggable database by applying the following yaml file **pdb_close.yaml** + +```yaml +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +#pdb_close.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + [secret section] +``` + +```bash +kubectl apply -f pdb_close.yaml +pdb.database.oracle.com/pdb1 configured + +sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=MOUNTED +ACTION=MODIFY +MSG=Success +``` +After that apply the unplug file **pdb_unplug.yaml** ; The resource is no longer available once the unplug operation is completed. + +```bash +kubectl apply -f pdb_unplug.yaml +pdb.database.oracle.com/pdb1 configured + +sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=MOUNTED +ACTION=MODIFY +MSG=Waiting for PDB to be unplugged +``` + +Check kubernets log files and the database alert log + +```text +/usr/bin/kubectl logs -f pod/`/usr/bin/kubectl get pods -n oracle-database-operator-system|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n oracle-database-operator-system +[...] +base-oracle-com-v1alpha1-pdb", "UID": "6f469423-85e5-4287-94d5-3d91a04b621e", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} +2023-01-03T14:04:05Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1 +2023-01-03T14:04:05Z INFO pdb-webhook validateCommon {"name": "pdb1"} +2023-01-03T14:04:05Z INFO pdb-webhook Valdiating PDB Resource Action : UNPLUG +2023-01-03T14:04:05Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "6f469423-85e5-4287-94d5-3d91a04b621e", "allowed": true} + + +[database alert log] +Domain Action Reconfiguration complete (total time 0.0 secs) +Completed: ALTER PLUGGABLE DATABASE "pdbdev" UNPLUG INTO '/tmp/pdbunplug.xml' +DROP PLUGGABLE DATABASE "pdbdev" KEEP DATAFILES +2023-01-03T14:04:05.518845+00:00 +Deleted Oracle managed file +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/TEMPFILE/temp.266.1125061101 +2023-01-03T14:04:05.547820+00:00 +Stopped service pdbdev +Completed: DROP PLUGGABLE DATABASE "pdbdev" KEEP DATAFILES + +``` + + +login to the server and check xml file existence. Verify the datafile path on the ASM filesystem. + +```bash +ls -ltr /tmp/pdbunplug.xml +-rw-r--r--. 1 oracle asmadmin 8007 Jan 3 14:04 /tmp/pdbunplug.xml +[..] +cat /tmp/pdbunplug.xml |grep path + +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/DATAFILE/system.353.1125061021 + +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/DATAFILE/sysaux.328.1125061021 + +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/DATAFILE/undotbs1.347.1125061021 + +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/TEMPFILE/temp.266.1125061101 + +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/DATAFILE/undo_2.318.1125061021 +[..] +asmcmd ls -l +DATA/DB12/F146D9482AA0260FE0531514000AB1BC/DATAFILE/system.353.1125061021 +Type Redund Striped Time Sys Name +DATAFILE UNPROT COARSE JAN 03 14:00:00 Y system.353.1125061021 +``` + +### PLUG DATABASE + +Prepare a new yaml file **pdb_plug.yaml** to plug the database back into the container. + +```yaml +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +# pdb_plug.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdbunplug.xml" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + action: "Plug" + [secrets section] +``` +Apply **pdb_plug.yaml** + +```bash +kubectl apply -f pdb_plug.yaml +[...] +sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE= +ACTION= +MSG=Waiting for PDB to be plugged +[...] +sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=READ WRITE +ACTION=PLUG +MSG=Success +``` + +Check kubernets log files and the database alert log + +```text +/usr/bin/kubectl logs -f pod/`/usr/bin/kubectl get pods -n oracle-database-operator-system|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n oracle-database-operator-system + +2023-01-03T14:33:51Z INFO pdb-webhook ValidateCreate-Validating PDB spec for : pdb1 +2023-01-03T14:33:51Z INFO pdb-webhook validateCommon {"name": "pdb1"} +2023-01-03T14:33:51Z INFO pdb-webhook Valdiating PDB Resource Action : PLUG +2023-01-03T14:33:51Z INFO pdb-webhook PDB Resource : pdb1 successfully validated for Action : PLUG +2023-01-03T14:33:51Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "fccac7ba-7540-42ff-93b2-46675506a098", "allowed": true} +2023-01-03T14:34:16Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "766dadcc-aeea-4a80-bc17-e957b4a44d3c", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} +2023-01-03T14:34:16Z INFO pdb-webhook Setting default values in PDB spec for : pdb1 +2023-01-03T14:34:16Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "766dadcc-aeea-4a80-bc17-e957b4a44d3c", "allowed": true} + +[database alert log] +... +All grantable enqueues granted +freeing rdom 3 +freeing the fusion rht of pdb 3 +freeing the pdb enqueue rht +Domain Action Reconfiguration complete (total time 0.0 secs) +Completed: CREATE PLUGGABLE DATABASE "pdbdev" + USING '/tmp/pdbunplug.xml' + SOURCE_FILE_NAME_CONVERT=NONE + MOVE + FILE_NAME_CONVERT=NONE + STORAGE UNLIMITED TEMPFILE REUSE + +2023-01-03T14:35:41.500186+00:00 +ALTER PLUGGABLE DATABASE "pdbdev" OPEN READ WRITE INSTANCES=ALL +2023-01-03T14:35:41.503482+00:00 +PDBDEV(3):Pluggable database PDBDEV opening in read write +PDBDEV(3):SUPLOG: Initialize PDB SUPLOG SGA, old value 0x0, new value 0x18 +PDBDEV(3):Autotune of undo retention is turned on +... +``` +### CLONE PDB + +Prepare and apply a new yaml file **pdb_clone.yaml** to clone the existing pluggable database. + +```yaml +#pdb_clone.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdb2-clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + action: "Clone" + [secret section] + +``` +```bash +kubectl apply -f pdb_clone.yaml +pdb.database.oracle.com/pdb2 created +[oracle@mitk01 https.ords.22]$ sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=READ WRITE +ACTION=PLUG +MSG=Success +================================================================== +CDB=cdb-dev +K8SNAME=pdb2 +PDBNAME=pdb2-clone +OPENMODE= +ACTION= +MSG=Waiting for PDB to be cloned +[...] +[.wait sometimes..] + sh checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=READ WRITE +ACTION=PLUG +MSG=Success +================================================================== +CDB=cdb-dev +K8SNAME=pdb2 +PDBNAME=pdb2-clone +OPENMODE=READ WRITE +ACTION=CLONE +MSG=Success +``` +log info + +```text +[kubernets log] +2023-01-03T15:13:31Z INFO pdb-webhook - asClone : false +2023-01-03T15:13:31Z INFO pdb-webhook - getScript : false +2023-01-03T15:13:31Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "7c17a715-7e4e-47d4-ad42-dcb37526bb3e", "allowed": true} +2023-01-03T15:13:31Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "11e0d49c-afaa-47ac-a301-f1fdd1e70173", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} +2023-01-03T15:13:31Z INFO pdb-webhook ValidateCreate-Validating PDB spec for : pdb2 +2023-01-03T15:13:31Z INFO pdb-webhook validateCommon {"name": "pdb2"} +2023-01-03T15:13:31Z INFO pdb-webhook Valdiating PDB Resource Action : CLONE +2023-01-03T15:13:31Z INFO pdb-webhook PDB Resource : pdb2 successfully validated for Action : CLONE +2023-01-03T15:13:31Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "11e0d49c-afaa-47ac-a301-f1fdd1e70173", "allowed": true} + +[database alert log] +Domain Action Reconfiguration complete (total time 0.0 secs) +2023-01-03T15:15:00.670436+00:00 +Completed: CREATE PLUGGABLE DATABASE "pdb2-clone" FROM "pdbdev" + STORAGE UNLIMITED + TEMPFILE REUSE + FILE_NAME_CONVERT=NONE +ALTER PLUGGABLE DATABASE "pdbdev" CLOSE IMMEDIATE INSTANCES=ALL +2023-01-03T15:15:00.684271+00:00 +PDBDEV(3):Pluggable database PDBDEV closing +PDBDEV(3):JIT: pid 8235 requesting stop +PDBDEV(3):Buffer Cache flush started: 3 +PDBDEV(3):Buffer Cache flush finished: 3 + +``` +### UNPLUG AND PLUG WITH TDE + + + + +> ⚠ __WARNING FOR THE TDE USERS__ ⚠ According to the [ords documentation](https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/op-database-pdbs-pdb_name-post.html) the plug and unplug operation with tde is supported only if ords runs on the same host of the database which is not the case of operator where ords runs on an isolated pods. Do not use pdb controller for unplug and plug operation with tde in production environments. + + + +You can use unplug and plug database with TDE; in order to do that you have to specify a key store path and create new kubernets secret for TDE using the following yaml file. **tde_secrete.yaml**. + +```yaml +#tde_secret +apiVersion: v1 +kind: Secret +metadata: + name: tde1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + tdepassword: "...." + tdesecret: "...." +``` + +```bash +kubectl apply -f tde_secret.yaml +``` + +The file to unplug and plug database with TDE are the following + + +```yaml +#pdb_unplugtde.yaml +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: + key: "sysadmin_user" + adminPwd: + secret: + secretName: pdb1-secret + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + tdePassword: + secret: + secretName: "tde1-secret" + key: "tdepassword" + tdeSecret: + secret: + secretName: "tde1-secret" + key: "tdesecret" + totalSize: 1G + tempSize: 1G + unlimitedStorage: true + reuseTempFile: true + fileNameConversions: NONE + action: "Unplug" + xmlFileName: "/home/oracle/unplugpdb.xml" + tdeExport: true +``` + +```yaml +#pdb_plugtde.ymal +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: pdb1-secret + key: "sysadmin_user" + adminPwd: + secret: + secretName: pdb1-secret + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + tdePassword: + secret: + secretName: "tde1-secret" + key: "tdepassword" + tdeSecret: + secret: + secretName: "tde1-secret" + key: "tdesecret" + totalSize: 1G + tempSize: "100M" + unlimitedStorage: true + reuseTempFile: true + fileNameConversions: NONE + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + action: "Plug" + xmlFileName: /home/oracle/unplugpdb.xml + tdeImport: true + tdeKeystorePath: /home/oracle/keystore + +``` + + + + + + diff --git a/docs/multitenant/ords-based/usecase02/pdb_clone.yaml b/docs/multitenant/ords-based/usecase02/pdb_clone.yaml new file mode 100644 index 00000000..5723f7c6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_clone.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase02/pdb_plug.yaml b/docs/multitenant/ords-based/usecase02/pdb_plug.yaml new file mode 100644 index 00000000..9eb5ed77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_plug.yaml @@ -0,0 +1,53 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml b/docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml new file mode 100644 index 00000000..995be538 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml @@ -0,0 +1,56 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: pdb1-secret + key: "sysadmin_user" + adminPwd: + secret: + secretName: pdb1-secret + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + tdePassword: + secret: + secretName: "tde1-secret" + key: "tdepassword" + tdeSecret: + secret: + secretName: "tde1-secret" + key: "tdesecret" + totalSize: 1G + tempSize: "100M" + unlimitedStorage: true + reuseTempFile: true + fileNameConversions: NONE + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + action: "Plug" + xmlFileName: /home/oracle/unplugpdb.xml + tdeImport: true + tdeKeystorePath: /home/oracle/keystore + diff --git a/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml b/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml new file mode 100644 index 00000000..0036d5f7 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml b/docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml new file mode 100644 index 00000000..2eacc5b7 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml @@ -0,0 +1,54 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +Kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: pdb1-secret + key: "sysadmin_user" + adminPwd: + secret: + secretName: pdb1-secret + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + tdePassword: + secret: + secretName: "tde1-secret" + key: "tdepassword" + tdeSecret: + secret: + secretName: "tde1-secret" + key: "tdesecret" + totalSize: 1G + tempSize: 1G + unlimitedStorage: true + reuseTempFile: true + fileNameConversions: NONE + action: "Unplug" + xmlFileName: "/home/oracle/unplugpdb.xml" + tdeExport: true + tdeKeystorePath: "/home/oracle/keystore" + diff --git a/docs/multitenant/provisioning/ords_image.md b/docs/multitenant/provisioning/ords_image.md new file mode 100644 index 00000000..e2d1dcef --- /dev/null +++ b/docs/multitenant/provisioning/ords_image.md @@ -0,0 +1,81 @@ + + +# Build ORDS Docker Image + +This file contains the steps to create an ORDS based image to be used solely by the PDB life cycle multitentant controllers. + +**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. + +#### Clone the software using git: + +> Under directory ./oracle-database-operator/ords you will find the [Dockerfile](../../../ords/Dockerfile) and [runOrdsSSL.sh](../../../ords/runOrdsSSL.sh) required to build the image. + +```sh + git clone git@orahub.oci.oraclecorp.com:rac-docker-dev/oracle-database-operator.git + cd oracle-database-operator/ords/ +``` + +#### Login to the registry: container-registry.oracle.com + +**NOTE:** To login to this registry, you will need to the URL https://container-registry.oracle.com , Sign in, then click on "Java" and then accept the agreement. + +```bash +docker login container-registry.oracle.com +``` + +#### Login to the your container registry + +Login to a repo where you want to push your docker image (if needed) to pull during deployment in your environment. + +```bash +docker login +``` + +#### Build the image + +Build the docker image by using below command: + +```bash +docker build -t oracle/ords-dboper:latest . +``` +> If your are working behind a proxy mind to specify https_proxy and http_proxy during image creation + +Check the docker image details using: + +```bash +docker images +``` + +> OUTPUT EXAMPLE +```bash +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/ords-dboper latest fdb17aa242f8 4 hours ago 1.46GB + +``` + +#### Tag and push the image + +Tag and push the image to your image repository. + +NOTE: We have the repo as `phx.ocir.io//oracle/ords:latest`. Please change as per your environment. + +```bash +docker tag oracle/ords-dboper:ords-latest phx.ocir.io//oracle/ords:latest +docker push phx.ocir.io//oracle/ords:latest +``` + +#### In case of private image + +If you the image not be public then yuo need to create a secret containing the password of your image repository. +Create a Kubernetes Secret for your docker repository to pull the image during deployment using the below command: + +```bash +kubectl create secret generic container-registry-secret --from-file=.dockerconfigjson=./.docker/config.json --type=kubernetes.io/dockerconfigjson -n oracle-database-operator-system +``` + +Use the parameter `ordsImagePullSecret` to specify the container secrets in pod creation yaml file + +#### [Image createion example](../usecase01/logfiles/BuildImage.log) + + + diff --git a/docs/multitenant/usecase01/logfiles/BuildImage.log b/docs/multitenant/usecase01/logfiles/BuildImage.log new file mode 100644 index 00000000..f35c66d8 --- /dev/null +++ b/docs/multitenant/usecase01/logfiles/BuildImage.log @@ -0,0 +1,896 @@ +/usr/bin/docker build -t oracle/ords-dboper:latest ../../../ords +Sending build context to Docker daemon 13.82kB +Step 1/12 : FROM container-registry.oracle.com/java/jdk:latest + ---> b8457e2f0b73 +Step 2/12 : ENV ORDS_HOME=/opt/oracle/ords/ RUN_FILE="runOrdsSSL.sh" ORDSVERSION=23.4.0-8 + ---> Using cache + ---> 3317a16cd6f8 +Step 3/12 : COPY $RUN_FILE $ORDS_HOME + ---> 7995edec33cc +Step 4/12 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install iproute && yum clean all + ---> Running in fe168b01f3ad +Oracle Linux 8 BaseOS Latest (x86_64) 91 MB/s | 79 MB 00:00 +Oracle Linux 8 Application Stream (x86_64) 69 MB/s | 62 MB 00:00 +Last metadata expiration check: 0:00:12 ago on Tue 20 Aug 2024 08:54:50 AM UTC. +Package yum-utils-4.0.21-23.0.1.el8.noarch is already installed. +Package tar-2:1.30-9.el8.x86_64 is already installed. +Package vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 is already installed. +Package procps-ng-3.3.15-14.0.1.el8.x86_64 is already installed. +Package curl-7.61.1-33.el8_9.5.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Arch Version Repository Size +================================================================================ +Installing: + bind-utils x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 453 k + expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k + hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k + lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k + net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k + openssl x86_64 1:1.1.1k-12.el8_9 ol8_baseos_latest 710 k + sudo x86_64 1.9.5p2-1.el8_9 ol8_baseos_latest 1.0 M + tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k + unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k + wget x86_64 1.19.5-12.0.1.el8_10 ol8_appstream 733 k + which x86_64 2.21-20.el8 ol8_baseos_latest 50 k + zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k +Upgrading: + curl x86_64 7.61.1-34.el8 ol8_baseos_latest 352 k + dnf-plugins-core noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 76 k + libcurl x86_64 7.61.1-34.el8 ol8_baseos_latest 303 k + python3-dnf-plugins-core + noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 263 k + yum-utils noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 75 k +Installing dependencies: + bind-libs x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 176 k + bind-libs-lite x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 1.2 M + bind-license noarch 32:9.11.36-16.el8_10.2 ol8_appstream 104 k + fstrm x86_64 0.6.1-3.el8 ol8_appstream 29 k + libmaxminddb x86_64 1.2.0-10.el8_9.1 ol8_appstream 32 k + libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k + protobuf-c x86_64 1.3.0-8.el8 ol8_appstream 37 k + python3-bind noarch 32:9.11.36-16.el8_10.2 ol8_appstream 151 k + python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k + tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M +Installing weak dependencies: + geolite2-city noarch 20180605-1.el8 ol8_appstream 19 M + geolite2-country noarch 20180605-1.el8 ol8_appstream 1.0 M + +Transaction Summary +================================================================================ +Install 24 Packages +Upgrade 5 Packages + +Total download size: 28 M +Downloading Packages: +(1/29): hostname-3.20-6.el8.x86_64.rpm 268 kB/s | 32 kB 00:00 +(2/29): libmetalink-0.1.3-7.el8.x86_64.rpm 257 kB/s | 32 kB 00:00 +(3/29): expect-5.45.4-5.el8.x86_64.rpm 1.4 MB/s | 266 kB 00:00 +(4/29): lsof-4.93.2-1.el8.x86_64.rpm 3.2 MB/s | 253 kB 00:00 +(5/29): net-tools-2.0-0.52.20160912git.el8.x86_ 3.6 MB/s | 322 kB 00:00 +(6/29): python3-ply-3.9-9.el8.noarch.rpm 2.7 MB/s | 111 kB 00:00 +(7/29): openssl-1.1.1k-12.el8_9.x86_64.rpm 10 MB/s | 710 kB 00:00 +(8/29): tree-1.7.0-15.el8.x86_64.rpm 2.2 MB/s | 59 kB 00:00 +(9/29): sudo-1.9.5p2-1.el8_9.x86_64.rpm 14 MB/s | 1.0 MB 00:00 +(10/29): unzip-6.0-46.0.1.el8.x86_64.rpm 6.8 MB/s | 196 kB 00:00 +(11/29): which-2.21-20.el8.x86_64.rpm 2.0 MB/s | 50 kB 00:00 +(12/29): tcl-8.6.8-2.el8.x86_64.rpm 13 MB/s | 1.1 MB 00:00 +(13/29): bind-libs-9.11.36-16.el8_10.2.x86_64.r 6.7 MB/s | 176 kB 00:00 +(14/29): zip-3.0-23.el8.x86_64.rpm 8.4 MB/s | 270 kB 00:00 +(15/29): bind-libs-lite-9.11.36-16.el8_10.2.x86 29 MB/s | 1.2 MB 00:00 +(16/29): bind-license-9.11.36-16.el8_10.2.noarc 3.3 MB/s | 104 kB 00:00 +(17/29): bind-utils-9.11.36-16.el8_10.2.x86_64. 13 MB/s | 453 kB 00:00 +(18/29): fstrm-0.6.1-3.el8.x86_64.rpm 1.2 MB/s | 29 kB 00:00 +(19/29): libmaxminddb-1.2.0-10.el8_9.1.x86_64.r 1.3 MB/s | 32 kB 00:00 +(20/29): geolite2-country-20180605-1.el8.noarch 17 MB/s | 1.0 MB 00:00 +(21/29): protobuf-c-1.3.0-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(22/29): python3-bind-9.11.36-16.el8_10.2.noarc 5.8 MB/s | 151 kB 00:00 +(23/29): wget-1.19.5-12.0.1.el8_10.x86_64.rpm 17 MB/s | 733 kB 00:00 +(24/29): curl-7.61.1-34.el8.x86_64.rpm 12 MB/s | 352 kB 00:00 +(25/29): dnf-plugins-core-4.0.21-25.0.1.el8.noa 2.4 MB/s | 76 kB 00:00 +(26/29): libcurl-7.61.1-34.el8.x86_64.rpm 8.6 MB/s | 303 kB 00:00 +(27/29): python3-dnf-plugins-core-4.0.21-25.0.1 9.8 MB/s | 263 kB 00:00 +(28/29): yum-utils-4.0.21-25.0.1.el8.noarch.rpm 3.0 MB/s | 75 kB 00:00 +(29/29): geolite2-city-20180605-1.el8.noarch.rp 66 MB/s | 19 MB 00:00 +-------------------------------------------------------------------------------- +Total 43 MB/s | 28 MB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Running scriptlet: protobuf-c-1.3.0-8.el8.x86_64 1/1 + Installing : protobuf-c-1.3.0-8.el8.x86_64 1/34 + Installing : fstrm-0.6.1-3.el8.x86_64 2/34 + Installing : bind-license-32:9.11.36-16.el8_10.2.noarch 3/34 + Upgrading : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 4/34 + Upgrading : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 5/34 + Upgrading : libcurl-7.61.1-34.el8.x86_64 6/34 + Installing : geolite2-country-20180605-1.el8.noarch 7/34 + Installing : geolite2-city-20180605-1.el8.noarch 8/34 + Installing : libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Running scriptlet: libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Installing : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 10/34 + Installing : bind-libs-32:9.11.36-16.el8_10.2.x86_64 11/34 + Installing : unzip-6.0-46.0.1.el8.x86_64 12/34 + Installing : tcl-1:8.6.8-2.el8.x86_64 13/34 + Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 13/34 + Installing : python3-ply-3.9-9.el8.noarch 14/34 + Installing : python3-bind-32:9.11.36-16.el8_10.2.noarch 15/34 + Installing : libmetalink-0.1.3-7.el8.x86_64 16/34 + Installing : wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Running scriptlet: wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Installing : bind-utils-32:9.11.36-16.el8_10.2.x86_64 18/34 + Installing : expect-5.45.4-5.el8.x86_64 19/34 + Installing : zip-3.0-23.el8.x86_64 20/34 + Upgrading : curl-7.61.1-34.el8.x86_64 21/34 + Upgrading : yum-utils-4.0.21-25.0.1.el8.noarch 22/34 + Installing : which-2.21-20.el8.x86_64 23/34 + Installing : tree-1.7.0-15.el8.x86_64 24/34 + Installing : sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Running scriptlet: sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Installing : openssl-1:1.1.1k-12.el8_9.x86_64 26/34 + Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Installing : lsof-4.93.2-1.el8.x86_64 28/34 + Installing : hostname-3.20-6.el8.x86_64 29/34 + Running scriptlet: hostname-3.20-6.el8.x86_64 29/34 + Cleanup : curl-7.61.1-33.el8_9.5.x86_64 30/34 + Cleanup : yum-utils-4.0.21-23.0.1.el8.noarch 31/34 + Cleanup : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Cleanup : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 33/34 + Cleanup : libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Running scriptlet: libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Verifying : expect-5.45.4-5.el8.x86_64 1/34 + Verifying : hostname-3.20-6.el8.x86_64 2/34 + Verifying : libmetalink-0.1.3-7.el8.x86_64 3/34 + Verifying : lsof-4.93.2-1.el8.x86_64 4/34 + Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 5/34 + Verifying : openssl-1:1.1.1k-12.el8_9.x86_64 6/34 + Verifying : python3-ply-3.9-9.el8.noarch 7/34 + Verifying : sudo-1.9.5p2-1.el8_9.x86_64 8/34 + Verifying : tcl-1:8.6.8-2.el8.x86_64 9/34 + Verifying : tree-1.7.0-15.el8.x86_64 10/34 + Verifying : unzip-6.0-46.0.1.el8.x86_64 11/34 + Verifying : which-2.21-20.el8.x86_64 12/34 + Verifying : zip-3.0-23.el8.x86_64 13/34 + Verifying : bind-libs-32:9.11.36-16.el8_10.2.x86_64 14/34 + Verifying : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 15/34 + Verifying : bind-license-32:9.11.36-16.el8_10.2.noarch 16/34 + Verifying : bind-utils-32:9.11.36-16.el8_10.2.x86_64 17/34 + Verifying : fstrm-0.6.1-3.el8.x86_64 18/34 + Verifying : geolite2-city-20180605-1.el8.noarch 19/34 + Verifying : geolite2-country-20180605-1.el8.noarch 20/34 + Verifying : libmaxminddb-1.2.0-10.el8_9.1.x86_64 21/34 + Verifying : protobuf-c-1.3.0-8.el8.x86_64 22/34 + Verifying : python3-bind-32:9.11.36-16.el8_10.2.noarch 23/34 + Verifying : wget-1.19.5-12.0.1.el8_10.x86_64 24/34 + Verifying : curl-7.61.1-34.el8.x86_64 25/34 + Verifying : curl-7.61.1-33.el8_9.5.x86_64 26/34 + Verifying : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 27/34 + Verifying : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 28/34 + Verifying : libcurl-7.61.1-34.el8.x86_64 29/34 + Verifying : libcurl-7.61.1-33.el8_9.5.x86_64 30/34 + Verifying : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 31/34 + Verifying : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Verifying : yum-utils-4.0.21-25.0.1.el8.noarch 33/34 + Verifying : yum-utils-4.0.21-23.0.1.el8.noarch 34/34 + +Upgraded: + curl-7.61.1-34.el8.x86_64 + dnf-plugins-core-4.0.21-25.0.1.el8.noarch + libcurl-7.61.1-34.el8.x86_64 + python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch + yum-utils-4.0.21-25.0.1.el8.noarch +Installed: + bind-libs-32:9.11.36-16.el8_10.2.x86_64 + bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 + bind-license-32:9.11.36-16.el8_10.2.noarch + bind-utils-32:9.11.36-16.el8_10.2.x86_64 + expect-5.45.4-5.el8.x86_64 + fstrm-0.6.1-3.el8.x86_64 + geolite2-city-20180605-1.el8.noarch + geolite2-country-20180605-1.el8.noarch + hostname-3.20-6.el8.x86_64 + libmaxminddb-1.2.0-10.el8_9.1.x86_64 + libmetalink-0.1.3-7.el8.x86_64 + lsof-4.93.2-1.el8.x86_64 + net-tools-2.0-0.52.20160912git.el8.x86_64 + openssl-1:1.1.1k-12.el8_9.x86_64 + protobuf-c-1.3.0-8.el8.x86_64 + python3-bind-32:9.11.36-16.el8_10.2.noarch + python3-ply-3.9-9.el8.noarch + sudo-1.9.5p2-1.el8_9.x86_64 + tcl-1:8.6.8-2.el8.x86_64 + tree-1.7.0-15.el8.x86_64 + unzip-6.0-46.0.1.el8.x86_64 + wget-1.19.5-12.0.1.el8_10.x86_64 + which-2.21-20.el8.x86_64 + zip-3.0-23.el8.x86_64 + +Complete! +Adding repo from: http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 +created by dnf config-manager from http://yum.o 496 kB/s | 139 kB 00:00 +Last metadata expiration check: 0:00:01 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Dependencies resolved. +============================================================================================== + Package Arch Version Repository Size +============================================================================================== +Installing: + java-11-openjdk-devel x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 3.4 M +Installing dependencies: + adwaita-cursor-theme noarch 3.28.0-3.el8 ol8_appstream 647 k + adwaita-icon-theme noarch 3.28.0-3.el8 ol8_appstream 11 M + alsa-lib x86_64 1.2.10-2.el8 ol8_appstream 500 k + at-spi2-atk x86_64 2.26.2-1.el8 ol8_appstream 89 k + at-spi2-core x86_64 2.28.0-1.el8 ol8_appstream 169 k + atk x86_64 2.28.1-1.el8 ol8_appstream 272 k + avahi-libs x86_64 0.7-27.el8 ol8_baseos_latest 61 k + cairo x86_64 1.15.12-6.el8 ol8_appstream 719 k + cairo-gobject x86_64 1.15.12-6.el8 ol8_appstream 33 k + colord-libs x86_64 1.4.2-1.el8 ol8_appstream 236 k + copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k + cpio x86_64 2.12-11.el8 ol8_baseos_latest 266 k + crypto-policies-scripts noarch 20230731-1.git3177e06.el8 ol8_baseos_latest 84 k + cups-libs x86_64 1:2.2.6-60.el8_10 ol8_baseos_latest 435 k + dracut x86_64 049-233.git20240115.0.1.el8 ol8_baseos_latest 382 k + file x86_64 5.33-25.el8 ol8_baseos_latest 77 k + fribidi x86_64 1.0.4-9.el8 ol8_appstream 89 k + gdk-pixbuf2 x86_64 2.36.12-6.el8_10 ol8_baseos_latest 465 k + gdk-pixbuf2-modules x86_64 2.36.12-6.el8_10 ol8_appstream 108 k + gettext x86_64 0.19.8.1-17.el8 ol8_baseos_latest 1.1 M + gettext-libs x86_64 0.19.8.1-17.el8 ol8_baseos_latest 312 k + glib-networking x86_64 2.56.1-1.1.el8 ol8_baseos_latest 155 k + graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k + grub2-common noarch 1:2.02-156.0.2.el8 ol8_baseos_latest 897 k + grub2-tools x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 2.0 M + grub2-tools-minimal x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 215 k + gsettings-desktop-schemas x86_64 3.32.0-6.el8 ol8_baseos_latest 633 k + gtk-update-icon-cache x86_64 3.22.30-11.el8 ol8_appstream 32 k + harfbuzz x86_64 1.7.5-4.el8 ol8_appstream 295 k + hicolor-icon-theme noarch 0.17-2.el8 ol8_appstream 48 k + jasper-libs x86_64 2.0.14-5.el8 ol8_appstream 167 k + java-11-openjdk x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 475 k + java-11-openjdk-headless x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 42 M + javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k + jbigkit-libs x86_64 2.1-14.el8 ol8_appstream 55 k + json-glib x86_64 1.4.4-1.el8 ol8_baseos_latest 144 k + kbd-legacy noarch 2.0.4-11.el8 ol8_baseos_latest 481 k + kbd-misc noarch 2.0.4-11.el8 ol8_baseos_latest 1.5 M + lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k + libX11 x86_64 1.6.8-8.el8 ol8_appstream 611 k + libX11-common noarch 1.6.8-8.el8 ol8_appstream 157 k + libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k + libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k + libXcursor x86_64 1.1.15-3.el8 ol8_appstream 36 k + libXdamage x86_64 1.1.4-14.el8 ol8_appstream 27 k + libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k + libXfixes x86_64 5.0.3-7.el8 ol8_appstream 25 k + libXft x86_64 2.3.3-1.el8 ol8_appstream 67 k + libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k + libXinerama x86_64 1.1.4-1.el8 ol8_appstream 15 k + libXrandr x86_64 1.5.2-1.el8 ol8_appstream 34 k + libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k + libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k + libcroco x86_64 0.6.12-4.el8_2.1 ol8_baseos_latest 113 k + libdatrie x86_64 0.2.9-7.el8 ol8_appstream 33 k + libepoxy x86_64 1.5.8-1.el8 ol8_appstream 225 k + libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k + libgomp x86_64 8.5.0-22.0.1.el8_10 ol8_baseos_latest 218 k + libgusb x86_64 0.3.0-1.el8 ol8_baseos_latest 49 k + libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k + libkcapi x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 52 k + libkcapi-hmaccalc x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 31 k + libmodman x86_64 2.0.1-17.el8 ol8_baseos_latest 36 k + libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k + libproxy x86_64 0.4.15-5.2.el8 ol8_baseos_latest 75 k + libsoup x86_64 2.62.3-5.el8 ol8_baseos_latest 424 k + libthai x86_64 0.1.27-2.el8 ol8_appstream 203 k + libtiff x86_64 4.0.9-32.el8_10 ol8_appstream 189 k + libwayland-client x86_64 1.21.0-1.el8 ol8_appstream 41 k + libwayland-cursor x86_64 1.21.0-1.el8 ol8_appstream 26 k + libwayland-egl x86_64 1.21.0-1.el8 ol8_appstream 19 k + libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k + libxkbcommon x86_64 0.9.1-1.el8 ol8_appstream 116 k + lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k + lua x86_64 5.3.4-12.el8 ol8_appstream 192 k + nspr x86_64 4.35.0-1.el8_8 ol8_appstream 143 k + nss x86_64 3.90.0-7.el8_10 ol8_appstream 750 k + nss-softokn x86_64 3.90.0-7.el8_10 ol8_appstream 1.2 M + nss-softokn-freebl x86_64 3.90.0-7.el8_10 ol8_appstream 375 k + nss-sysinit x86_64 3.90.0-7.el8_10 ol8_appstream 74 k + nss-util x86_64 3.90.0-7.el8_10 ol8_appstream 139 k + os-prober x86_64 1.74-9.0.1.el8 ol8_baseos_latest 51 k + pango x86_64 1.42.4-8.el8 ol8_appstream 297 k + pixman x86_64 0.38.4-4.el8 ol8_appstream 256 k + pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k + pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k + pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k + rest x86_64 0.8.1-2.el8 ol8_appstream 70 k + shared-mime-info x86_64 1.9-4.el8 ol8_baseos_latest 328 k + systemd-udev x86_64 239-78.0.4.el8 ol8_baseos_latest 1.6 M + ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k + tzdata-java noarch 2024a-1.0.1.el8 ol8_appstream 186 k + xkeyboard-config noarch 2.28-1.el8 ol8_appstream 782 k + xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k + xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k + xz x86_64 5.2.4-4.el8_6 ol8_baseos_latest 153 k +Installing weak dependencies: + abattis-cantarell-fonts noarch 0.0.25-6.el8 ol8_appstream 155 k + dconf x86_64 0.28.0-4.0.1.el8 ol8_appstream 108 k + dejavu-sans-mono-fonts noarch 2.35-7.el8 ol8_baseos_latest 447 k + grubby x86_64 8.40-49.0.2.el8 ol8_baseos_latest 50 k + gtk3 x86_64 3.22.30-11.el8 ol8_appstream 4.5 M + hardlink x86_64 1:1.3-6.el8 ol8_baseos_latest 29 k + kbd x86_64 2.0.4-11.el8 ol8_baseos_latest 390 k + memstrack x86_64 0.2.5-2.el8 ol8_baseos_latest 51 k + pigz x86_64 2.4-4.el8 ol8_baseos_latest 80 k +Enabling module streams: + javapackages-runtime 201801 + +Transaction Summary +============================================================================================== +Install 106 Packages + +Total download size: 86 M +Installed size: 312 M +Downloading Packages: +(1/106): crypto-policies-scripts-20230731-1.git 862 kB/s | 84 kB 00:00 +(2/106): avahi-libs-0.7-27.el8.x86_64.rpm 602 kB/s | 61 kB 00:00 +(3/106): cpio-2.12-11.el8.x86_64.rpm 1.8 MB/s | 266 kB 00:00 +(4/106): cups-libs-2.2.6-60.el8_10.x86_64.rpm 5.7 MB/s | 435 kB 00:00 +(5/106): dejavu-sans-mono-fonts-2.35-7.el8.noar 5.1 MB/s | 447 kB 00:00 +(6/106): dracut-049-233.git20240115.0.1.el8.x86 7.0 MB/s | 382 kB 00:00 +(7/106): gdk-pixbuf2-2.36.12-6.el8_10.x86_64.rp 12 MB/s | 465 kB 00:00 +(8/106): gettext-libs-0.19.8.1-17.el8.x86_64.rp 9.3 MB/s | 312 kB 00:00 +(9/106): gettext-0.19.8.1-17.el8.x86_64.rpm 16 MB/s | 1.1 MB 00:00 +(10/106): glib-networking-2.56.1-1.1.el8.x86_64 6.0 MB/s | 155 kB 00:00 +(11/106): grub2-common-2.02-156.0.2.el8.noarch. 26 MB/s | 897 kB 00:00 +(12/106): grub2-tools-minimal-2.02-156.0.2.el8. 8.2 MB/s | 215 kB 00:00 +(13/106): grubby-8.40-49.0.2.el8.x86_64.rpm 2.1 MB/s | 50 kB 00:00 +(14/106): grub2-tools-2.02-156.0.2.el8.x86_64.r 26 MB/s | 2.0 MB 00:00 +(15/106): gsettings-desktop-schemas-3.32.0-6.el 19 MB/s | 633 kB 00:00 +(16/106): hardlink-1.3-6.el8.x86_64.rpm 1.1 MB/s | 29 kB 00:00 +(17/106): json-glib-1.4.4-1.el8.x86_64.rpm 5.9 MB/s | 144 kB 00:00 +(18/106): kbd-2.0.4-11.el8.x86_64.rpm 14 MB/s | 390 kB 00:00 +(19/106): kbd-legacy-2.0.4-11.el8.noarch.rpm 17 MB/s | 481 kB 00:00 +(20/106): kbd-misc-2.0.4-11.el8.noarch.rpm 41 MB/s | 1.5 MB 00:00 +(21/106): libcroco-0.6.12-4.el8_2.1.x86_64.rpm 4.7 MB/s | 113 kB 00:00 +(22/106): libgomp-8.5.0-22.0.1.el8_10.x86_64.rp 9.1 MB/s | 218 kB 00:00 +(23/106): libgusb-0.3.0-1.el8.x86_64.rpm 2.1 MB/s | 49 kB 00:00 +(24/106): libkcapi-1.4.0-2.0.1.el8.x86_64.rpm 1.6 MB/s | 52 kB 00:00 +(25/106): libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86 822 kB/s | 31 kB 00:00 +(26/106): libmodman-2.0.1-17.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(27/106): libpkgconf-1.4.2-1.el8.x86_64.rpm 1.2 MB/s | 35 kB 00:00 +(28/106): libproxy-0.4.15-5.2.el8.x86_64.rpm 3.0 MB/s | 75 kB 00:00 +(29/106): libsoup-2.62.3-5.el8.x86_64.rpm 15 MB/s | 424 kB 00:00 +(30/106): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.5 MB/s | 100 kB 00:00 +(31/106): memstrack-0.2.5-2.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(32/106): os-prober-1.74-9.0.1.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(33/106): pigz-2.4-4.el8.x86_64.rpm 3.5 MB/s | 80 kB 00:00 +(34/106): pkgconf-1.4.2-1.el8.x86_64.rpm 1.7 MB/s | 38 kB 00:00 +(35/106): pkgconf-m4-1.4.2-1.el8.noarch.rpm 761 kB/s | 17 kB 00:00 +(36/106): pkgconf-pkg-config-1.4.2-1.el8.x86_64 691 kB/s | 15 kB 00:00 +(37/106): shared-mime-info-1.9-4.el8.x86_64.rpm 13 MB/s | 328 kB 00:00 +(38/106): systemd-udev-239-78.0.4.el8.x86_64.rp 32 MB/s | 1.6 MB 00:00 +(39/106): xz-5.2.4-4.el8_6.x86_64.rpm 5.2 MB/s | 153 kB 00:00 +(40/106): abattis-cantarell-fonts-0.0.25-6.el8. 6.4 MB/s | 155 kB 00:00 +(41/106): adwaita-cursor-theme-3.28.0-3.el8.noa 22 MB/s | 647 kB 00:00 +(42/106): alsa-lib-1.2.10-2.el8.x86_64.rpm 18 MB/s | 500 kB 00:00 +(43/106): at-spi2-atk-2.26.2-1.el8.x86_64.rpm 3.8 MB/s | 89 kB 00:00 +(44/106): at-spi2-core-2.28.0-1.el8.x86_64.rpm 6.9 MB/s | 169 kB 00:00 +(45/106): atk-2.28.1-1.el8.x86_64.rpm 9.2 MB/s | 272 kB 00:00 +(46/106): cairo-1.15.12-6.el8.x86_64.rpm 24 MB/s | 719 kB 00:00 +(47/106): adwaita-icon-theme-3.28.0-3.el8.noarc 65 MB/s | 11 MB 00:00 +(48/106): cairo-gobject-1.15.12-6.el8.x86_64.rp 914 kB/s | 33 kB 00:00 +(49/106): colord-libs-1.4.2-1.el8.x86_64.rpm 9.5 MB/s | 236 kB 00:00 +(50/106): copy-jdk-configs-4.0-2.el8.noarch.rpm 1.1 MB/s | 30 kB 00:00 +(51/106): dconf-0.28.0-4.0.1.el8.x86_64.rpm 4.4 MB/s | 108 kB 00:00 +(52/106): fribidi-1.0.4-9.el8.x86_64.rpm 3.9 MB/s | 89 kB 00:00 +(53/106): graphite2-1.3.10-10.el8.x86_64.rpm 5.1 MB/s | 122 kB 00:00 +(54/106): gdk-pixbuf2-modules-2.36.12-6.el8_10. 3.6 MB/s | 108 kB 00:00 +(55/106): gtk-update-icon-cache-3.22.30-11.el8. 1.4 MB/s | 32 kB 00:00 +(56/106): harfbuzz-1.7.5-4.el8.x86_64.rpm 11 MB/s | 295 kB 00:00 +(57/106): gtk3-3.22.30-11.el8.x86_64.rpm 68 MB/s | 4.5 MB 00:00 +(58/106): hicolor-icon-theme-0.17-2.el8.noarch. 2.1 MB/s | 48 kB 00:00 +(59/106): java-11-openjdk-11.0.24.0.8-3.0.1.el8 17 MB/s | 475 kB 00:00 +(60/106): jasper-libs-2.0.14-5.el8.x86_64.rpm 5.0 MB/s | 167 kB 00:00 +(61/106): java-11-openjdk-devel-11.0.24.0.8-3.0 61 MB/s | 3.4 MB 00:00 +(62/106): javapackages-filesystem-5.3.0-1.modul 1.2 MB/s | 30 kB 00:00 +(63/106): jbigkit-libs-2.1-14.el8.x86_64.rpm 2.1 MB/s | 55 kB 00:00 +(64/106): lcms2-2.9-2.el8.x86_64.rpm 3.8 MB/s | 164 kB 00:00 +(65/106): libX11-1.6.8-8.el8.x86_64.rpm 20 MB/s | 611 kB 00:00 +(66/106): libX11-common-1.6.8-8.el8.noarch.rpm 6.8 MB/s | 157 kB 00:00 +(67/106): libXau-1.0.9-3.el8.x86_64.rpm 1.6 MB/s | 37 kB 00:00 +(68/106): libXcomposite-0.4.4-14.el8.x86_64.rpm 1.3 MB/s | 28 kB 00:00 +(69/106): libXcursor-1.1.15-3.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(70/106): libXdamage-1.1.4-14.el8.x86_64.rpm 1.2 MB/s | 27 kB 00:00 +(71/106): libXext-1.3.4-1.el8.x86_64.rpm 2.0 MB/s | 45 kB 00:00 +(72/106): libXfixes-5.0.3-7.el8.x86_64.rpm 1.1 MB/s | 25 kB 00:00 +(73/106): libXft-2.3.3-1.el8.x86_64.rpm 2.9 MB/s | 67 kB 00:00 +(74/106): libXi-1.7.10-1.el8.x86_64.rpm 2.2 MB/s | 49 kB 00:00 +(75/106): libXinerama-1.1.4-1.el8.x86_64.rpm 717 kB/s | 15 kB 00:00 +(76/106): libXrandr-1.5.2-1.el8.x86_64.rpm 1.5 MB/s | 34 kB 00:00 +(77/106): libXrender-0.9.10-7.el8.x86_64.rpm 1.4 MB/s | 33 kB 00:00 +(78/106): libXtst-1.2.3-7.el8.x86_64.rpm 957 kB/s | 22 kB 00:00 +(79/106): java-11-openjdk-headless-11.0.24.0.8- 71 MB/s | 42 MB 00:00 +(80/106): libdatrie-0.2.9-7.el8.x86_64.rpm 274 kB/s | 33 kB 00:00 +(81/106): libepoxy-1.5.8-1.el8.x86_64.rpm 9.1 MB/s | 225 kB 00:00 +(82/106): libfontenc-1.1.3-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(83/106): libthai-0.1.27-2.el8.x86_64.rpm 8.2 MB/s | 203 kB 00:00 +(84/106): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 5.1 MB/s | 157 kB 00:00 +(85/106): libtiff-4.0.9-32.el8_10.x86_64.rpm 7.8 MB/s | 189 kB 00:00 +(86/106): libwayland-client-1.21.0-1.el8.x86_64 1.7 MB/s | 41 kB 00:00 +(87/106): libwayland-cursor-1.21.0-1.el8.x86_64 1.2 MB/s | 26 kB 00:00 +(88/106): libwayland-egl-1.21.0-1.el8.x86_64.rp 801 kB/s | 19 kB 00:00 +(89/106): libxcb-1.13.1-1.el8.x86_64.rpm 9.7 MB/s | 231 kB 00:00 +(90/106): libxkbcommon-0.9.1-1.el8.x86_64.rpm 5.0 MB/s | 116 kB 00:00 +(91/106): nspr-4.35.0-1.el8_8.x86_64.rpm 6.0 MB/s | 143 kB 00:00 +(92/106): lua-5.3.4-12.el8.x86_64.rpm 5.9 MB/s | 192 kB 00:00 +(93/106): nss-softokn-3.90.0-7.el8_10.x86_64.rp 38 MB/s | 1.2 MB 00:00 +(94/106): nss-3.90.0-7.el8_10.x86_64.rpm 17 MB/s | 750 kB 00:00 +(95/106): nss-softokn-freebl-3.90.0-7.el8_10.x8 14 MB/s | 375 kB 00:00 +(96/106): nss-sysinit-3.90.0-7.el8_10.x86_64.rp 3.2 MB/s | 74 kB 00:00 +(97/106): nss-util-3.90.0-7.el8_10.x86_64.rpm 5.8 MB/s | 139 kB 00:00 +(98/106): pango-1.42.4-8.el8.x86_64.rpm 11 MB/s | 297 kB 00:00 +(99/106): pixman-0.38.4-4.el8.x86_64.rpm 10 MB/s | 256 kB 00:00 +(100/106): rest-0.8.1-2.el8.x86_64.rpm 3.1 MB/s | 70 kB 00:00 +(101/106): ttmkfdir-3.0.9-54.el8.x86_64.rpm 2.5 MB/s | 62 kB 00:00 +(102/106): tzdata-java-2024a-1.0.1.el8.noarch.r 7.4 MB/s | 186 kB 00:00 +(103/106): xkeyboard-config-2.28-1.el8.noarch.r 27 MB/s | 782 kB 00:00 +(104/106): xorg-x11-font-utils-7.5-41.el8.x86_6 3.9 MB/s | 104 kB 00:00 +(105/106): xorg-x11-fonts-Type1-7.5-19.el8.noar 1.3 MB/s | 522 kB 00:00 +(106/106): file-5.33-25.el8.x86_64.rpm 26 kB/s | 77 kB 00:02 +-------------------------------------------------------------------------------- +Total 27 MB/s | 86 MB 00:03 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 1/1 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86 1/1 + Preparing : 1/1 + Installing : nspr-4.35.0-1.el8_8.x86_64 1/106 + Running scriptlet: nspr-4.35.0-1.el8_8.x86_64 1/106 + Installing : nss-util-3.90.0-7.el8_10.x86_64 2/106 + Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/106 + Installing : pixman-0.38.4-4.el8.x86_64 4/106 + Installing : libwayland-client-1.21.0-1.el8.x86_64 5/106 + Installing : atk-2.28.1-1.el8.x86_64 6/106 + Installing : libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Running scriptlet: libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Installing : libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Running scriptlet: libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Installing : grub2-common-1:2.02-156.0.2.el8.noarch 9/106 + Installing : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Installing : gettext-0.19.8.1-17.el8.x86_64 11/106 + Running scriptlet: gettext-0.19.8.1-17.el8.x86_64 11/106 + Installing : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 12/106 + Installing : libwayland-cursor-1.21.0-1.el8.x86_64 13/106 + Installing : jasper-libs-2.0.14-5.el8.x86_64 14/106 + Installing : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 15/106 + Installing : nss-softokn-3.90.0-7.el8_10.x86_64 16/106 + Installing : xkeyboard-config-2.28-1.el8.noarch 17/106 + Installing : libxkbcommon-0.9.1-1.el8.x86_64 18/106 + Installing : tzdata-java-2024a-1.0.1.el8.noarch 19/106 + Installing : ttmkfdir-3.0.9-54.el8.x86_64 20/106 + Installing : lua-5.3.4-12.el8.x86_64 21/106 + Installing : copy-jdk-configs-4.0-2.el8.noarch 22/106 + Installing : libwayland-egl-1.21.0-1.el8.x86_64 23/106 + Installing : libfontenc-1.1.3-8.el8.x86_64 24/106 + Installing : libepoxy-1.5.8-1.el8.x86_64 25/106 + Installing : libdatrie-0.2.9-7.el8.x86_64 26/106 + Running scriptlet: libdatrie-0.2.9-7.el8.x86_64 26/106 + Installing : libthai-0.1.27-2.el8.x86_64 27/106 + Running scriptlet: libthai-0.1.27-2.el8.x86_64 27/106 + Installing : libXau-1.0.9-3.el8.x86_64 28/106 + Installing : libxcb-1.13.1-1.el8.x86_64 29/106 + Installing : libX11-common-1.6.8-8.el8.noarch 30/106 + Installing : libX11-1.6.8-8.el8.x86_64 31/106 + Installing : libXext-1.3.4-1.el8.x86_64 32/106 + Installing : libXrender-0.9.10-7.el8.x86_64 33/106 + Installing : cairo-1.15.12-6.el8.x86_64 34/106 + Installing : libXi-1.7.10-1.el8.x86_64 35/106 + Installing : libXfixes-5.0.3-7.el8.x86_64 36/106 + Installing : libXtst-1.2.3-7.el8.x86_64 37/106 + Installing : libXcomposite-0.4.4-14.el8.x86_64 38/106 + Installing : at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Running scriptlet: at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Installing : at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Running scriptlet: at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Installing : libXcursor-1.1.15-3.el8.x86_64 41/106 + Installing : libXdamage-1.1.4-14.el8.x86_64 42/106 + Installing : cairo-gobject-1.15.12-6.el8.x86_64 43/106 + Installing : libXft-2.3.3-1.el8.x86_64 44/106 + Installing : libXrandr-1.5.2-1.el8.x86_64 45/106 + Installing : libXinerama-1.1.4-1.el8.x86_64 46/106 + Installing : lcms2-2.9-2.el8.x86_64 47/106 + Running scriptlet: lcms2-2.9-2.el8.x86_64 47/106 + Installing : jbigkit-libs-2.1-14.el8.x86_64 48/106 + Running scriptlet: jbigkit-libs-2.1-14.el8.x86_64 48/106 + Installing : libtiff-4.0.9-32.el8_10.x86_64 49/106 + Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+ 50/106 + Installing : hicolor-icon-theme-0.17-2.el8.noarch 51/106 + Installing : graphite2-1.3.10-10.el8.x86_64 52/106 + Installing : harfbuzz-1.7.5-4.el8.x86_64 53/106 + Running scriptlet: harfbuzz-1.7.5-4.el8.x86_64 53/106 + Installing : fribidi-1.0.4-9.el8.x86_64 54/106 + Installing : pango-1.42.4-8.el8.x86_64 55/106 + Running scriptlet: pango-1.42.4-8.el8.x86_64 55/106 + Installing : dconf-0.28.0-4.0.1.el8.x86_64 56/106 + Installing : alsa-lib-1.2.10-2.el8.x86_64 57/106 + Running scriptlet: alsa-lib-1.2.10-2.el8.x86_64 57/106 + Installing : adwaita-cursor-theme-3.28.0-3.el8.noarch 58/106 + Installing : adwaita-icon-theme-3.28.0-3.el8.noarch 59/106 + Installing : abattis-cantarell-fonts-0.0.25-6.el8.noarch 60/106 + Installing : xz-5.2.4-4.el8_6.x86_64 61/106 + Installing : shared-mime-info-1.9-4.el8.x86_64 62/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 62/106 + Installing : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Installing : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 64/106 + Installing : gtk-update-icon-cache-3.22.30-11.el8.x86_64 65/106 + Installing : pkgconf-m4-1.4.2-1.el8.noarch 66/106 + Installing : pigz-2.4-4.el8.x86_64 67/106 + Installing : memstrack-0.2.5-2.el8.x86_64 68/106 + Installing : lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Installing : libpkgconf-1.4.2-1.el8.x86_64 70/106 + Installing : pkgconf-1.4.2-1.el8.x86_64 71/106 + Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 72/106 + Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 73/106 + Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Installing : libmodman-2.0.1-17.el8.x86_64 75/106 + Running scriptlet: libmodman-2.0.1-17.el8.x86_64 75/106 + Installing : libproxy-0.4.15-5.2.el8.x86_64 76/106 + Running scriptlet: libproxy-0.4.15-5.2.el8.x86_64 76/106 + Installing : libkcapi-1.4.0-2.0.1.el8.x86_64 77/106 + Installing : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 78/106 + Installing : libgusb-0.3.0-1.el8.x86_64 79/106 + Installing : colord-libs-1.4.2-1.el8.x86_64 80/106 + Installing : kbd-misc-2.0.4-11.el8.noarch 81/106 + Installing : kbd-legacy-2.0.4-11.el8.noarch 82/106 + Installing : kbd-2.0.4-11.el8.x86_64 83/106 + Installing : systemd-udev-239-78.0.4.el8.x86_64 84/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 84/106 + Installing : os-prober-1.74-9.0.1.el8.x86_64 85/106 + Installing : json-glib-1.4.4-1.el8.x86_64 86/106 + Installing : hardlink-1:1.3-6.el8.x86_64 87/106 + Installing : file-5.33-25.el8.x86_64 88/106 + Installing : dejavu-sans-mono-fonts-2.35-7.el8.noarch 89/106 + Installing : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 90/106 + Installing : glib-networking-2.56.1-1.1.el8.x86_64 91/106 + Installing : libsoup-2.62.3-5.el8.x86_64 92/106 + Installing : rest-0.8.1-2.el8.x86_64 93/106 + Running scriptlet: rest-0.8.1-2.el8.x86_64 93/106 + Installing : cpio-2.12-11.el8.x86_64 94/106 + Installing : dracut-049-233.git20240115.0.1.el8.x86_64 95/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grubby-8.40-49.0.2.el8.x86_64 97/106 + Installing : crypto-policies-scripts-20230731-1.git3177e06.el 98/106 + Installing : nss-sysinit-3.90.0-7.el8_10.x86_64 99/106 + Installing : nss-3.90.0-7.el8_10.x86_64 100/106 + Installing : avahi-libs-0.7-27.el8.x86_64 101/106 + Installing : cups-libs-1:2.2.6-60.el8_10.x86_64 102/106 + Installing : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Installing : gtk3-3.22.30-11.el8.x86_64 104/106 + Installing : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Installing : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 106/106 + Running scriptlet: dconf-0.28.0-4.0.1.el8.x86_64 106/106 + Running scriptlet: crypto-policies-scripts-20230731-1.git3177e06.el 106/106 + Running scriptlet: nss-3.90.0-7.el8_10.x86_64 106/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 106/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: hicolor-icon-theme-0.17-2.el8.noarch 106/106 + Running scriptlet: adwaita-icon-theme-3.28.0-3.el8.noarch 106/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 106/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 106/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 106/106 + Verifying : avahi-libs-0.7-27.el8.x86_64 1/106 + Verifying : cpio-2.12-11.el8.x86_64 2/106 + Verifying : crypto-policies-scripts-20230731-1.git3177e06.el 3/106 + Verifying : cups-libs-1:2.2.6-60.el8_10.x86_64 4/106 + Verifying : dejavu-sans-mono-fonts-2.35-7.el8.noarch 5/106 + Verifying : dracut-049-233.git20240115.0.1.el8.x86_64 6/106 + Verifying : file-5.33-25.el8.x86_64 7/106 + Verifying : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 8/106 + Verifying : gettext-0.19.8.1-17.el8.x86_64 9/106 + Verifying : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Verifying : glib-networking-2.56.1-1.1.el8.x86_64 11/106 + Verifying : grub2-common-1:2.02-156.0.2.el8.noarch 12/106 + Verifying : grub2-tools-1:2.02-156.0.2.el8.x86_64 13/106 + Verifying : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 14/106 + Verifying : grubby-8.40-49.0.2.el8.x86_64 15/106 + Verifying : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 16/106 + Verifying : hardlink-1:1.3-6.el8.x86_64 17/106 + Verifying : json-glib-1.4.4-1.el8.x86_64 18/106 + Verifying : kbd-2.0.4-11.el8.x86_64 19/106 + Verifying : kbd-legacy-2.0.4-11.el8.noarch 20/106 + Verifying : kbd-misc-2.0.4-11.el8.noarch 21/106 + Verifying : libcroco-0.6.12-4.el8_2.1.x86_64 22/106 + Verifying : libgomp-8.5.0-22.0.1.el8_10.x86_64 23/106 + Verifying : libgusb-0.3.0-1.el8.x86_64 24/106 + Verifying : libkcapi-1.4.0-2.0.1.el8.x86_64 25/106 + Verifying : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 26/106 + Verifying : libmodman-2.0.1-17.el8.x86_64 27/106 + Verifying : libpkgconf-1.4.2-1.el8.x86_64 28/106 + Verifying : libproxy-0.4.15-5.2.el8.x86_64 29/106 + Verifying : libsoup-2.62.3-5.el8.x86_64 30/106 + Verifying : lksctp-tools-1.0.18-3.el8.x86_64 31/106 + Verifying : memstrack-0.2.5-2.el8.x86_64 32/106 + Verifying : os-prober-1.74-9.0.1.el8.x86_64 33/106 + Verifying : pigz-2.4-4.el8.x86_64 34/106 + Verifying : pkgconf-1.4.2-1.el8.x86_64 35/106 + Verifying : pkgconf-m4-1.4.2-1.el8.noarch 36/106 + Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 37/106 + Verifying : shared-mime-info-1.9-4.el8.x86_64 38/106 + Verifying : systemd-udev-239-78.0.4.el8.x86_64 39/106 + Verifying : xz-5.2.4-4.el8_6.x86_64 40/106 + Verifying : abattis-cantarell-fonts-0.0.25-6.el8.noarch 41/106 + Verifying : adwaita-cursor-theme-3.28.0-3.el8.noarch 42/106 + Verifying : adwaita-icon-theme-3.28.0-3.el8.noarch 43/106 + Verifying : alsa-lib-1.2.10-2.el8.x86_64 44/106 + Verifying : at-spi2-atk-2.26.2-1.el8.x86_64 45/106 + Verifying : at-spi2-core-2.28.0-1.el8.x86_64 46/106 + Verifying : atk-2.28.1-1.el8.x86_64 47/106 + Verifying : cairo-1.15.12-6.el8.x86_64 48/106 + Verifying : cairo-gobject-1.15.12-6.el8.x86_64 49/106 + Verifying : colord-libs-1.4.2-1.el8.x86_64 50/106 + Verifying : copy-jdk-configs-4.0-2.el8.noarch 51/106 + Verifying : dconf-0.28.0-4.0.1.el8.x86_64 52/106 + Verifying : fribidi-1.0.4-9.el8.x86_64 53/106 + Verifying : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 54/106 + Verifying : graphite2-1.3.10-10.el8.x86_64 55/106 + Verifying : gtk-update-icon-cache-3.22.30-11.el8.x86_64 56/106 + Verifying : gtk3-3.22.30-11.el8.x86_64 57/106 + Verifying : harfbuzz-1.7.5-4.el8.x86_64 58/106 + Verifying : hicolor-icon-theme-0.17-2.el8.noarch 59/106 + Verifying : jasper-libs-2.0.14-5.el8.x86_64 60/106 + Verifying : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 61/106 + Verifying : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 62/106 + Verifying : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 63/106 + Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+ 64/106 + Verifying : jbigkit-libs-2.1-14.el8.x86_64 65/106 + Verifying : lcms2-2.9-2.el8.x86_64 66/106 + Verifying : libX11-1.6.8-8.el8.x86_64 67/106 + Verifying : libX11-common-1.6.8-8.el8.noarch 68/106 + Verifying : libXau-1.0.9-3.el8.x86_64 69/106 + Verifying : libXcomposite-0.4.4-14.el8.x86_64 70/106 + Verifying : libXcursor-1.1.15-3.el8.x86_64 71/106 + Verifying : libXdamage-1.1.4-14.el8.x86_64 72/106 + Verifying : libXext-1.3.4-1.el8.x86_64 73/106 + Verifying : libXfixes-5.0.3-7.el8.x86_64 74/106 + Verifying : libXft-2.3.3-1.el8.x86_64 75/106 + Verifying : libXi-1.7.10-1.el8.x86_64 76/106 + Verifying : libXinerama-1.1.4-1.el8.x86_64 77/106 + Verifying : libXrandr-1.5.2-1.el8.x86_64 78/106 + Verifying : libXrender-0.9.10-7.el8.x86_64 79/106 + Verifying : libXtst-1.2.3-7.el8.x86_64 80/106 + Verifying : libdatrie-0.2.9-7.el8.x86_64 81/106 + Verifying : libepoxy-1.5.8-1.el8.x86_64 82/106 + Verifying : libfontenc-1.1.3-8.el8.x86_64 83/106 + Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 84/106 + Verifying : libthai-0.1.27-2.el8.x86_64 85/106 + Verifying : libtiff-4.0.9-32.el8_10.x86_64 86/106 + Verifying : libwayland-client-1.21.0-1.el8.x86_64 87/106 + Verifying : libwayland-cursor-1.21.0-1.el8.x86_64 88/106 + Verifying : libwayland-egl-1.21.0-1.el8.x86_64 89/106 + Verifying : libxcb-1.13.1-1.el8.x86_64 90/106 + Verifying : libxkbcommon-0.9.1-1.el8.x86_64 91/106 + Verifying : lua-5.3.4-12.el8.x86_64 92/106 + Verifying : nspr-4.35.0-1.el8_8.x86_64 93/106 + Verifying : nss-3.90.0-7.el8_10.x86_64 94/106 + Verifying : nss-softokn-3.90.0-7.el8_10.x86_64 95/106 + Verifying : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 96/106 + Verifying : nss-sysinit-3.90.0-7.el8_10.x86_64 97/106 + Verifying : nss-util-3.90.0-7.el8_10.x86_64 98/106 + Verifying : pango-1.42.4-8.el8.x86_64 99/106 + Verifying : pixman-0.38.4-4.el8.x86_64 100/106 + Verifying : rest-0.8.1-2.el8.x86_64 101/106 + Verifying : ttmkfdir-3.0.9-54.el8.x86_64 102/106 + Verifying : tzdata-java-2024a-1.0.1.el8.noarch 103/106 + Verifying : xkeyboard-config-2.28-1.el8.noarch 104/106 + Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 105/106 + Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 106/106 + +Installed: + abattis-cantarell-fonts-0.0.25-6.el8.noarch + adwaita-cursor-theme-3.28.0-3.el8.noarch + adwaita-icon-theme-3.28.0-3.el8.noarch + alsa-lib-1.2.10-2.el8.x86_64 + at-spi2-atk-2.26.2-1.el8.x86_64 + at-spi2-core-2.28.0-1.el8.x86_64 + atk-2.28.1-1.el8.x86_64 + avahi-libs-0.7-27.el8.x86_64 + cairo-1.15.12-6.el8.x86_64 + cairo-gobject-1.15.12-6.el8.x86_64 + colord-libs-1.4.2-1.el8.x86_64 + copy-jdk-configs-4.0-2.el8.noarch + cpio-2.12-11.el8.x86_64 + crypto-policies-scripts-20230731-1.git3177e06.el8.noarch + cups-libs-1:2.2.6-60.el8_10.x86_64 + dconf-0.28.0-4.0.1.el8.x86_64 + dejavu-sans-mono-fonts-2.35-7.el8.noarch + dracut-049-233.git20240115.0.1.el8.x86_64 + file-5.33-25.el8.x86_64 + fribidi-1.0.4-9.el8.x86_64 + gdk-pixbuf2-2.36.12-6.el8_10.x86_64 + gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 + gettext-0.19.8.1-17.el8.x86_64 + gettext-libs-0.19.8.1-17.el8.x86_64 + glib-networking-2.56.1-1.1.el8.x86_64 + graphite2-1.3.10-10.el8.x86_64 + grub2-common-1:2.02-156.0.2.el8.noarch + grub2-tools-1:2.02-156.0.2.el8.x86_64 + grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 + grubby-8.40-49.0.2.el8.x86_64 + gsettings-desktop-schemas-3.32.0-6.el8.x86_64 + gtk-update-icon-cache-3.22.30-11.el8.x86_64 + gtk3-3.22.30-11.el8.x86_64 + hardlink-1:1.3-6.el8.x86_64 + harfbuzz-1.7.5-4.el8.x86_64 + hicolor-icon-theme-0.17-2.el8.noarch + jasper-libs-2.0.14-5.el8.x86_64 + java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86_64 + javapackages-filesystem-5.3.0-1.module+el8+5136+7ff78f74.noarch + jbigkit-libs-2.1-14.el8.x86_64 + json-glib-1.4.4-1.el8.x86_64 + kbd-2.0.4-11.el8.x86_64 + kbd-legacy-2.0.4-11.el8.noarch + kbd-misc-2.0.4-11.el8.noarch + lcms2-2.9-2.el8.x86_64 + libX11-1.6.8-8.el8.x86_64 + libX11-common-1.6.8-8.el8.noarch + libXau-1.0.9-3.el8.x86_64 + libXcomposite-0.4.4-14.el8.x86_64 + libXcursor-1.1.15-3.el8.x86_64 + libXdamage-1.1.4-14.el8.x86_64 + libXext-1.3.4-1.el8.x86_64 + libXfixes-5.0.3-7.el8.x86_64 + libXft-2.3.3-1.el8.x86_64 + libXi-1.7.10-1.el8.x86_64 + libXinerama-1.1.4-1.el8.x86_64 + libXrandr-1.5.2-1.el8.x86_64 + libXrender-0.9.10-7.el8.x86_64 + libXtst-1.2.3-7.el8.x86_64 + libcroco-0.6.12-4.el8_2.1.x86_64 + libdatrie-0.2.9-7.el8.x86_64 + libepoxy-1.5.8-1.el8.x86_64 + libfontenc-1.1.3-8.el8.x86_64 + libgomp-8.5.0-22.0.1.el8_10.x86_64 + libgusb-0.3.0-1.el8.x86_64 + libjpeg-turbo-1.5.3-12.el8.x86_64 + libkcapi-1.4.0-2.0.1.el8.x86_64 + libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 + libmodman-2.0.1-17.el8.x86_64 + libpkgconf-1.4.2-1.el8.x86_64 + libproxy-0.4.15-5.2.el8.x86_64 + libsoup-2.62.3-5.el8.x86_64 + libthai-0.1.27-2.el8.x86_64 + libtiff-4.0.9-32.el8_10.x86_64 + libwayland-client-1.21.0-1.el8.x86_64 + libwayland-cursor-1.21.0-1.el8.x86_64 + libwayland-egl-1.21.0-1.el8.x86_64 + libxcb-1.13.1-1.el8.x86_64 + libxkbcommon-0.9.1-1.el8.x86_64 + lksctp-tools-1.0.18-3.el8.x86_64 + lua-5.3.4-12.el8.x86_64 + memstrack-0.2.5-2.el8.x86_64 + nspr-4.35.0-1.el8_8.x86_64 + nss-3.90.0-7.el8_10.x86_64 + nss-softokn-3.90.0-7.el8_10.x86_64 + nss-softokn-freebl-3.90.0-7.el8_10.x86_64 + nss-sysinit-3.90.0-7.el8_10.x86_64 + nss-util-3.90.0-7.el8_10.x86_64 + os-prober-1.74-9.0.1.el8.x86_64 + pango-1.42.4-8.el8.x86_64 + pigz-2.4-4.el8.x86_64 + pixman-0.38.4-4.el8.x86_64 + pkgconf-1.4.2-1.el8.x86_64 + pkgconf-m4-1.4.2-1.el8.noarch + pkgconf-pkg-config-1.4.2-1.el8.x86_64 + rest-0.8.1-2.el8.x86_64 + shared-mime-info-1.9-4.el8.x86_64 + systemd-udev-239-78.0.4.el8.x86_64 + ttmkfdir-3.0.9-54.el8.x86_64 + tzdata-java-2024a-1.0.1.el8.noarch + xkeyboard-config-2.28-1.el8.noarch + xorg-x11-font-utils-1:7.5-41.el8.x86_64 + xorg-x11-fonts-Type1-7.5-19.el8.noarch + xz-5.2.4-4.el8_6.x86_64 + +Complete! +Last metadata expiration check: 0:00:23 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Package iproute-6.2.0-5.el8_9.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Architecture Version Repository Size +================================================================================ +Upgrading: + iproute x86_64 6.2.0-6.el8_10 ol8_baseos_latest 853 k + +Transaction Summary +================================================================================ +Upgrade 1 Package + +Total download size: 853 k +Downloading Packages: +iproute-6.2.0-6.el8_10.x86_64.rpm 4.2 MB/s | 853 kB 00:00 +-------------------------------------------------------------------------------- +Total 4.2 MB/s | 853 kB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Upgrading : iproute-6.2.0-6.el8_10.x86_64 1/2 + Cleanup : iproute-6.2.0-5.el8_9.x86_64 2/2 + Running scriptlet: iproute-6.2.0-5.el8_9.x86_64 2/2 + Verifying : iproute-6.2.0-6.el8_10.x86_64 1/2 + Verifying : iproute-6.2.0-5.el8_9.x86_64 2/2 + +Upgraded: + iproute-6.2.0-6.el8_10.x86_64 + +Complete! +24 files removed +Removing intermediate container fe168b01f3ad + ---> 791878694a50 +Step 5/12 : RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 59d7143da358 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 108M 100 108M 0 0 1440k 0 0:01:16 0:01:16 --:--:-- 1578k +Removing intermediate container 59d7143da358 + ---> 17c4534293e5 +Step 6/12 : RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 84b1cbffdc51 +Verifying... ######################################## +Preparing... ######################################## +Updating / installing... +ords-23.4.0-8.el8 ######################################## +INFO: Before starting ORDS service, run the below command as user oracle: + ords --config /etc/ords/config install +Removing intermediate container 84b1cbffdc51 + ---> 6e7151b79588 +Step 7/12 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + ---> Running in 66e5db5f343f +Removing intermediate container 66e5db5f343f + ---> 0523dc897bf4 +Step 8/12 : USER oracle + ---> Running in ffda8495ac77 +Removing intermediate container ffda8495ac77 + ---> 162acd4d0b93 +Step 9/12 : WORKDIR /home/oracle + ---> Running in 8c14310ffbc7 +Removing intermediate container 8c14310ffbc7 + ---> c8dae809e772 +Step 10/12 : VOLUME ["$ORDS_HOME/config/ords"] + ---> Running in ed64548fd997 +Removing intermediate container ed64548fd997 + ---> 22e2c99247b0 +Step 11/12 : EXPOSE 8888 + ---> Running in 921f7c85d61d +Removing intermediate container 921f7c85d61d + ---> e5d503c92224 +Step 12/12 : CMD $ORDS_HOME/$RUN_FILE + ---> Running in cad487298d63 +Removing intermediate container cad487298d63 + ---> fdb17aa242f8 +Successfully built fdb17aa242f8 +Successfully tagged oracle/ords-dboper:latest +08:57:18 oracle@mitk01:# + diff --git a/docs/multitenant/usecase01/logfiles/openssl_execution.log b/docs/multitenant/usecase01/logfiles/openssl_execution.log new file mode 100644 index 00000000..e3915a21 --- /dev/null +++ b/docs/multitenant/usecase01/logfiles/openssl_execution.log @@ -0,0 +1,19 @@ +CREATING TLS CERTIFICATES +/usr/bin/openssl genrsa -out ca.key 2048 +Generating RSA private key, 2048 bit long modulus (2 primes) +......................+++++ +..................................................+++++ +e is 65537 (0x010001) +/usr/bin/openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost Root CA " -out ca.crt +/usr/bin/openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost" -out server.csr +Generating a RSA private key +...........+++++ +...........................................+++++ +writing new private key to 'tls.key' +----- +/usr/bin/echo "subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com" > extfile.txt +/usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt +Signature ok +subject=C = US, ST = California, L = SanFrancisco, O = "oracle ", CN = "cdb-dev-ords.oracle-database-operator-system ", CN = localhost +Getting CA Private Key + diff --git a/docs/multitenant/usecase01/logfiles/ordsconfig.log b/docs/multitenant/usecase01/logfiles/ordsconfig.log new file mode 100644 index 00000000..b787b752 --- /dev/null +++ b/docs/multitenant/usecase01/logfiles/ordsconfig.log @@ -0,0 +1,39 @@ +ORDS: Release 23.4 Production on Tue Aug 20 07:48:44 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Database pool: default + +Setting Value Source +----------------------------------------- -------------------------------------------------- ----------- +database.api.enabled true Global +database.api.management.services.disabled false Global +db.cdb.adminUser C##DBAPI_CDB_ADMIN AS SYSDBA Pool +db.cdb.adminUser.password ****** Pool Wallet +db.connectionType customurl Pool +db.customURL jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90 Pool + )(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNEC + T_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL= + TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONL + Y))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST= + scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNEC + T_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +db.password ****** Pool Wallet +db.serviceNameSuffix Pool +db.username ORDS_PUBLIC_USER Pool +error.externalPath /opt/oracle/ords/error Global +jdbc.InitialLimit 50 Pool +jdbc.MaxLimit 100 Pool +misc.pagination.maxRows 1000 Pool +plsql.gateway.mode disabled Pool +restEnabledSql.active true Pool +security.requestValidationFunction ords_util.authorize_plsql_gateway Pool +security.verifySSL true Global +standalone.access.log /home/oracle Global +standalone.https.cert /opt/oracle/ords//secrets/tls.crt Global +standalone.https.cert.key /opt/oracle/ords//secrets/tls.key Global +standalone.https.port 8888 Global + diff --git a/docs/multitenant/usecase01/makefile b/docs/multitenant/usecase01/makefile new file mode 100644 index 00000000..d4176c75 --- /dev/null +++ b/docs/multitenant/usecase01/makefile @@ -0,0 +1,284 @@ +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# +# ___ +# / _ \ _ __ _ __ _ __ ___ _ __ ___ +# | | | | '_ \| '_ \| '__/ _ \ '_ ` _ \ +# | |_| | | | | |_) | | | __/ | | | | | +# \___/|_| |_| .__/|_| \___|_| |_| |_| +# |_| +# ____ _ _ _ +# / ___|___ _ __ | |_ _ __ ___ | | | ___ _ __ +# | | / _ \| '_ \| __| '__/ _ \| | |/ _ \ '__| +# | |__| (_) | | | | |_| | | (_) | | | __/ | +# \____\___/|_| |_|\__|_| \___/|_|_|\___|_| +# +# +# This makefile helps to speed up the kubectl commands executions to deploy and test +# the OnPremises operator. Although it has few functionality you can adapt to your needs +# by adding much more targets. +# +# Quick start: +# ~~~~~~~~~~~ +# +# - Copy files of tab.1 in the makefile directory. +# - Edit the secret files and other yaml files with the correct credential as +# specified in the documentation. +# - Edit makefile updating variables of tab.2 +# - Execute commands of tab.3 "make step1" "make step2" "make step3".... +# +# Tab.1 - List of required files +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# |oracle-database-operator.yaml| Opertaor yaml file | +# +-----------------------------+---------------------------------------------+ +# |cdb_secret.yaml | Secret file for the rest server pod | +# +-----------------------------+---------------------------------------------+ +# |pdb_secret.yaml | Secret file for the pdb creation | +# +-----------------------------+---------------------------------------------+ +# |tde_secret.yaml | Secret file for the tablepsace enc. | +# +-----------------------------+---------------------------------------------+ +# |cdb_create.yaml | Rest server pod creation | +# +-----------------------------+---------------------------------------------+ +# |pdb_create.yaml | Pluggable database creation | +# +-----------------------------+---------------------------------------------+ +# |pdb_close.yaml | Close pluggable database | +# +-----------------------------+---------------------------------------------+ +# |pdb_open.yaml | Open pluggable database | +# +-----------------------------+---------------------------------------------+ +# |pdb_map.yaml | Map an existing pdb | +# +-----------------------------+---------------------------------------------+ +# |oracle-database-operator.yaml| Database operator | +# +-----------------------------+---------------------------------------------+ +# |Dockerfiles | Dockerfile for CBD | +# +-----------------------------+---------------------------------------------+ +# |runOrdsSSL.sh | Init script executed by Dockerfile | +# +-----------------------------+---------------------------------------------+ +# +# Tab.2 - List of variables +# ~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# |OCIR | Your image registry | +# +-----------------------------+---------------------------------------------+ +# |OCIRPATH | Path of the image in your registry | +# +-----------------------------+---------------------------------------------+ +# +# Tab.3 - Execution steps +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# | MAKEFILE TARGETS LIST | +# | ----- ooo ----- | +# | - TARGET - - DESCRIPTION - | +# +-----------------------------+-------------------------------------+-------+ +# |step1 | Build rest server images | | +# +-----------------------------+-------------------------------------+ REST | +# |step2 | Tag the immages | SRV | +# +-----------------------------+-------------------------------------+ IMG | +# |step3 | Push the image into the repository | | +# +-----------------------------+-------------------------------------+-------+ +# |step4 | Load webhook certmanager | DB | +# +-----------------------------+-------------------------------------+ OPER | +# |step5 | Create the db operator | | +# +-----------------------------+-------------------------------------+-------+ +# |step6 | Create tls certificates | T | +# +-----------------------------+-------------------------------------+ L | +# |step7 | Create tls secret | S | +# +-----------------------------+---------------------------------------------+ +# |step8 | Create database secrets | +# +-----------------------------+---------------------------------------------+ +# |step9 | Create restserver pod | +# | | +---------------------------------------------+ +# | +---> checkstep9 | Monitor the executions | +# +-----------------------------+---------------------------------------------+ +# |step10 | Create pluggable database | +# | | +---------------------------------------------+ +# | +---> checkpdb | Monitor PDB status | +# +-----------------------------+---------------------------------------------+ +# |step11 | Close pluggable database | +# +-----------------------------+---------------------------------------------+ +# |step12 | Open pluggable database | +# +-----------------------------+---------------------------------------------+ +# |step13 | Map pluggable database | +# +-----------------------------+---------------------------------------------+ +# | Before testing step13 delete the crd: | +# | kubectl delete pdb pdb1 -n oracle-database-operator-system | +# +---------------------------------------------------------------------------+ +# |step14 | delete pdb | +# +-----------------------------+---------------------------------------------+ +# | DIAGNOSTIC TARGETS | +# +-----------------------------+---------------------------------------------+ +# | dump | Dump pods info into a file | +# +-----------------------------+---------------------------------------------+ +# | reloadop | Reload the db operator | +# +-----------------------------+---------------------------------------------+ +# | login | Login into cdb pod | +# +-----------------------------+---------------------------------------------+ + + +################ TAB 2 VARIABLES ############ +OCIR=[...........YOUR REGISTRY...........] +OCIRPATH=[...PATH IN YOUR REGISTRY.....]/$(REST_SERVER)-dboper:$(ORDSVERSION) +############################################# +REST_SERVER=ords +ORDSVERSION=latest +DOCKER=/usr/bin/docker +KUBECTL=/usr/bin/kubectl +ORDS=/usr/local/bin/ords +CONFIG=/etc/ords/config +IMAGE=oracle/$(REST_SERVER)-dboper:$(ORDSVERSION) +DBOPERATOR=oracle-database-operator.yaml +URLPATH=/_/db-api/stable/database/pdbs/ +OPENSSL=/usr/bin/openssl +ORDSPORT=8888 +MAKE=/usr/bin/make +DOCKERFILE=../../../ords/Dockerfile +RUNSCRIPT=../../../ords/runOrdsSSL.sh +ORDSIMGDIR=../../../ords +RM=/usr/bin/rm +CP=/usr/bin/cp +ECHO=/usr/bin/echo +NAMESPACE=oracle-database-operator-system +CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +CDB_SECRET=cdb_secret.yaml +PDB_SECRET=pdb_secret.yaml +TDE_SECRET=tde_secret.yaml +CDB=cdb_create.yaml +PDB=pdb_create.yaml +PDB_CLOSE=pdb_close.yaml +PDB_OPEN=pdb_open.yaml +PDB_MAP=pdb_map.yaml +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +COMPANY=oracle +LOCALHOST=localhost +RESTPREFIX=cdb-dev + +step1: createimage +step2: tagimage +step3: push +step4: certmanager +step5: dboperator +step6: tlscert +step7: tlssecret +step8: dbsecret +step9: cdb +step10: pdb +step11: close +step12: open +step13: map +step14: delete + +checkstep9: checkcdb + + +createimage: + $(DOCKER) build -t $(IMAGE) $(ORDSIMGDIR) + +tagimage: + @echo "TAG IMAGE" + $(DOCKER) tag $(IMAGE) $(OCIR)$(OCIRPATH) + +push: + @echo "PUSH IMAGE INTO THE REGISTRY" + $(DOCKER) push $(OCIR)$(OCIRPATH) + +certmanager: + @echo "WEBHOOK CERT MANAGER" + $(KUBECTL) apply -f $(CERTMANAGER) + +dboperator: + @echo "ORACLE DATABASE OPERATOR" + $(KUBECTL) apply -f $(DBOPERATOR) + + +#C: Country +#ST: State +#L: locality (city) +#O: Organization Name Organization Unit +#CN: Common Name + +tlscert: + @echo "CREATING TLS CERTIFICATES" + $(OPENSSL) genrsa -out ca.key 2048 + $(OPENSSL) req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE) /CN=$(LOCALHOST) Root CA " -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE) /CN=$(LOCALHOST)" -out server.csr + $(ECHO) "subjectAltName=DNS:$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE),DNS:www.example.com" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out $(SCRT) + +tlssecret: + @echo "CREATING TLS SECRETS" + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(NAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(NAMESPACE) + +dbsecret: + @echo "CREATING DB SECRETS" + $(KUBECTL) apply -f $(CDB_SECRET) -n $(NAMESPACE) + $(KUBECTL) apply -f $(PDB_SECRET) -n $(NAMESPACE) + $(KUBECTL) apply -f $(TDE_SECRET) -n $(NAMESPACE) + +cdb: + @echo "CREATING REST SRV POD" + $(KUBECTL) apply -f $(CDB) + +checkcdb: + $(KUBECTL) logs -f `$(KUBECTL) get pods -n $(NAMESPACE)|grep $(REST_SERVER)|cut -d ' ' -f 1` -n $(NAMESPACE) + +pdb: + $(KUBECTL) apply -f $(PDB) + +close: + $(KUBECTL) apply -f $(PDB_CLOSE) + +open: + $(KUBECTL) apply -f $(PDB_OPEN) + +map: + $(KUBECTL) apply -f $(PDB_MAP) + +checkpdb: + $(KUBECTL) get pdbs -n $(NAMESPACE) + +delete: + $(KUBECTL) apply -f pdb_delete.yaml + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(NAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(NAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(NAMESPACE) >>$(DIAGFILE) + @echo "CDB LOG DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs `$(KUBECTL) get pods -n $(NAMESPACE)|grep $(REST_SERVER)| cut -d ' ' -f 1` -n $(NAMESPACE) >>$(DIAGFILE) + @echo "SECRET DMP" >>$(DIAGFILE) + @echo "~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) get secrets -o yaml -n $(NAMESPACE) >> $(DIAGFILE) + @echo "CDB/PDB DMP" >> $(DIAGFILE) + $(KUBECTL) get pdbs -o yaml -n $(NAMESPACE) >> $(DIAGFILE) + $(KUBECTL) get cdb -o yaml -n $(NAMESPACE) >> $(DIAGFILE) + @echo "CLUSTER INFO" >> $(DIAGFILE) + $(KUBECTL) get nodes -o wide + $(KUBECTL) get svc --namespace=kube-system + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(NAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(NAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(NAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(NAMESPACE) -o yaml | kubectl replace --force -f - + +login: + $(KUBECTL) exec -it `$(KUBECTL) get pods -n $(NAMESPACE)|grep $(REST_SERVER)|cut -d ' ' -f 1` -n $(NAMESPACE) bash + diff --git a/docs/multitenant/usecase03/Dockerfile b/docs/multitenant/usecase03/Dockerfile new file mode 100644 index 00000000..772a7e6d --- /dev/null +++ b/docs/multitenant/usecase03/Dockerfile @@ -0,0 +1,80 @@ +## Copyright (c) 2022 Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + +FROM container-registry.oracle.com/java/jdk:latest + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +ENV ORDS_HOME=/opt/oracle/ords/ \ + RUN_FILE="runOrdsSSL.sh" \ + ORDSVERSION=23.4.0-8 + +# Copy binaries +# ------------- +COPY $RUN_FILE $ORDS_HOME + +RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && \ + yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && \ + yum -y install java-11-openjdk-devel && \ + yum -y install iproute && \ + yum clean all + +RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + +RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + +# Setup filesystem and oracle user +# -------------------------------- +RUN mkdir -p $ORDS_HOME/doc_root && \ + mkdir -p $ORDS_HOME/error && \ + mkdir -p $ORDS_HOME/secrets && \ + chmod ug+x $ORDS_HOME/*.sh && \ + groupadd -g 54322 dba && \ + usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && \ + chown -R oracle:dba $ORDS_HOME && \ + echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# Finalize setup +# ------------------- +USER oracle +WORKDIR /home/oracle + +VOLUME ["$ORDS_HOME/config/ords"] +EXPOSE 8888 + +# Define default command to start Ords Services +CMD $ORDS_HOME/$RUN_FILE + diff --git a/docs/multitenant/usecase03/NamespaceSegregation.png b/docs/multitenant/usecase03/NamespaceSegregation.png new file mode 100644 index 00000000..bcb0ae77 Binary files /dev/null and b/docs/multitenant/usecase03/NamespaceSegregation.png differ diff --git a/docs/multitenant/usecase03/README.md b/docs/multitenant/usecase03/README.md new file mode 100644 index 00000000..c06368cd --- /dev/null +++ b/docs/multitenant/usecase03/README.md @@ -0,0 +1,268 @@ + + + +# STEP BY STEP (NAMESPACE SEGREGATION) + +- [STEP BY STEP (NAMESPACE SEGREGATION)](#step-by-step-namespace-segregation) + - [INTRODUCTION](#introduction) + - [GIT CLONE ORACLE DATABASE OPERATOR PROJECT](#git-clone-oracle-database-operator-project) + - [NAMESPACE CREATION](#namespace-creation) + - [WEBHOOK CERTIFICATES](#webhook-certificates) + - [ORACLE DATABASE OPERATOR](#oracle-database-operator) + - [CREATE PDB AND CDB SECRETS](#create-pdb-and-cdb-secrets) + - [CREATE TLS CERTIFICATE](#create-tls-certificate) + - [REST SERVER IMAGE CREATION](#rest-server-image-creation) + - [CDB POD CREATION](#cdb-pod-creation) + - [PDB CREATION](#pdb-creation) + - [MAKEFILE](#makefile) + + +### INTRODUCTION + +> ☞ This folder contains the yaml files required to configure and manage cdb and pdb in different namespaces. The main change here is the possibility to specify the namespace where CDB will be created, this implies the introduction of new parameter at PDB level in order to specify the CDB namespace. + +Tasks performed in the usecase03 are the same ones of the other usecase01 with the exception that controller pods cdb pods and pdb crd are running in different namespaces. You must be aware of the fact that secrets must be created in the proper namespaces; cdb secrets go into cdb namespace , pdb secrets go into pdbnamespace while certificate secrets need to be created in every namespace. + + +| yaml file parameters | value | description /ords parameter | +|-------------- |--------------------------- |-------------------------------------------------| +| ☞ cdbNamespace | | Cdb namespace | +| dbserver | or | [--db-hostname][1] | +| dbTnsurl | | [--db-custom-url/db.customURL][dbtnsurl] | +| port | | [--db-port][2] | +| cdbName | | Container Name | +| name | | Ords podname prefix in cdb.yaml | +| name | | pdb resource in pdb.yaml | +| ordsImage | /ords-dboper:latest|My public container registry | +| pdbName | | Pluggable database name | +| servicename | | [--db-servicename][3] | +| sysadmin_user | | [--admin-user][adminuser] | +| sysadmin_pwd | | [--password-stdin][pwdstdin] | +| cdbadmin_user | | [db.cdb.adminUser][1] | +| cdbadmin_pwd | | [db.cdb.adminUser.password][cdbadminpwd] | +| webserver_user| | [https user][http] NOT A DB USER | +| webserver_pwd | | [http user password][http] | +| ords_pwd | | [ORDS_PUBLIC_USER password][public_user] | +| pdbTlsKey | | [standalone.https.cert.key][key] | +| pdbTlsCrt | | [standalone.https.cert][cr] | +| pdbTlsCat | | certificate authority | +| xmlFileName | | path for the unplug and plug operation | +| srcPdbName | | name of the database to be cloned | +| fileNameConversions | | used for database cloning | +| tdeKeystorePath | | [tdeKeystorePath][tdeKeystorePath] | +| tdeExport | | [tdeExport] | +| tdeSecret | | [tdeSecret][tdeSecret] | +| tdePassword | | [tdeSecret][tdeSecret] | +| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | + +![generla schema](./NamespaceSegregation.png) + +### GIT CLONE ORACLE DATABASE OPERATOR PROJECT + +```bash +git clone https://github.com/oracle/oracle-database-operator.git +cd oracle-database-operator/docs/multitenant/usecase03 +``` +### NAMESPACE CREATION + +We need first to create two different namespaces (**cdbnamespace**,**pdbnamespace**) using ns_pdb_namespace.yaml and ns_cdb_namespace.yaml + +```bash +kubectl apply -f ns_pdb_namespace.yaml +kubectl apply -f ns_cdb_namespace.yaml +``` + +### WEBHOOK CERTIFICATES +Create cert manager and verify the status + +```bash +kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +``` + +```bash +kubectl get pods --namespace cert-manager +NAME READY STATUS RESTARTS AGE +cert-manager-75997f4b44-4nf5c 1/1 Running 1 9d +cert-manager-cainjector-769785cd7b-mzfq5 1/1 Running 1 9d +cert-manager-webhook-6bc9944d78-tslrp 1/1 Running 1 9d +``` + +### ORACLE DATABASE OPERATOR + +Create the oracle database operator using oracle-database-operator.yaml +```bash +cd oracle-database-operator +kubectl apply -f oracle-database-operator.yaml +cd - +``` + +[operator creation log](operator_creation_log.txt) +### CREATE PDB AND CDB SECRETS + +Update secrets files with your base64 encodede password. + +```bash +echo ImAdemoPassword | base64 +SW1BZGVtb1Bhc3N3b3JkCg== +``` +Apply the cdb_secret and pdb_secret yaml file to generate credential information in each namespace. + +``` +kubectl apply -f cdb_secret.yaml +kubectl apply -f pdb_secret.yaml +``` +> ☞ Note that https credential needs to be replicated in any secret file. It is possible to improve configuration by creating a dedicated namespace for https credential in order to specify this information only once. + +Namespace segregation enables the capability of deploying and manages pluggable database without the cdb administrative passwords. + +### CREATE TLS CERTIFICATE + +Here follow an example of script shell that can be used to create secret certificates in each namespace involved in the kubernets multi tenant architecture + +```bash +#!/bin/bash +export CDB_NAMESPACE=cdbnamespace +export PDB_NAMESPACE=pdbnamespace +export OPR_NAMESPACE=oracle-database-operator-system +export SKEY=tls.key +export SCRT=tls.crt +export CART=ca.crt +export COMPANY=oracle +export REST_SERVER=ords + +openssl genrsa -out ca.key 2048 +openssl req -new -x509 -days 365 -key ca.key -subj "/C=CN/ST=GD/L=SZ/O=${COMPANY}, Inc./CN=${COMPANY} Root CA" -out ca.crt +openssl req -newkey rsa:2048 -nodes -keyout ${SKEY} -subj "/C=CN/ST=GD/L=SZ/O=${COMPANY}, Inc./CN=cdb-dev-${REST_SERVER}.${CDB_NAMESPACE}" -out server.csr +echo "subjectAltName=DNS:cdb-dev-${REST_SERVER}.${CDB_NAMESPACE},DNS:www.example.com" > extfile.txt +openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out ${SCRT} + +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${CDB_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${CDB_NAMESPACE} +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${PDB_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${PDB_NAMESPACE} +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${OPR_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${OPR_NAMESPACE} +``` +after all secrets creation you shoud have the following pattern + +```bash +kubectl get secrets -n oracle-database-operator-system +NAME TYPE DATA AGE +db-ca Opaque 1 6d5h +db-tls kubernetes.io/tls 2 6d5h +webhook-server-cert kubernetes.io/tls 3 6d15h + + +kubectl get secrets -n cdbnamespace +NAME TYPE DATA AGE +cdb1-secret Opaque 6 6d15h +db-ca Opaque 1 6d6h +db-tls kubernetes.io/tls 2 6d6h + + +kubectl get secrets -n pdbnamespace +NAME TYPE DATA AGE +db-ca Opaque 1 6d6h +db-tls kubernetes.io/tls 2 6d6h +pdb1-secret Opaque 4 2d16h +tde1-secret Opaque 2 22h +``` +### REST SERVER IMAGE CREATION + +```bash +cd oracle-database-operator/ords +docker build -t oracle/ords-dboper:latest . +docker tag oracle/ords-dboper:latest [path_of_your_registry]/ords-dboper:latest +docker push [path_of_your_registry]/ords-dboper.latest +cd - +``` + +### CDB POD CREATION + +**note:** + Before creating the CDB pod make sure that all the pluggable databases in the container DB are open. + + + +Update the cdb_create.yaml with the path of the image generated before to create CDB pod + +```bash +kubectl apply -f cdb_create.yaml +``` + +Verify the status of the operation and cdb pod existence using the following commands + +```bash +## check the pod creation +kubectl get pods -n cdbnamespace + +## check the rest server log after pod creation +kubectl logs -f `/usr/bin/kubectl get pods -n cdbnamespace|grep ords|cut -d ' ' -f 1` -n cdbnamespace + +##login to the pod for further debug and information gathering +kubectl exec -it `kubectl get pods -n cdbnamespace |grep ords|cut -d ' ' -f 1` -n cdbnamespace bash +``` + +[log cdb creation](./cdb_creation_log.txt) + +### PDB CREATION + +Apply the the pdb_create.yaml file to create a new pdb , after pdb creation you should be able to get pdb details using **kubectl get** command + +```bash +kubectl apply -f pdb_create.yaml +``` + +```bash +#!/bin/bash +#checkpdbs.sh +kubectl get pdbs -n pdbnamespace -o=jsonpath='{range .items[*]} +{"\n==================================================================\n"} +{"CDB="}{.metadata.labels.cdb} +{"K8SNAME="}{.metadata.name} +{"PDBNAME="}{.spec.pdbName} +{"OPENMODE="}{.status.openMode} +{"ACTION="}{.status.action} +{"MSG="}{.status.msg} +{"\n"}{end}' +``` + +```bash +./checkpdbs.sh +================================================================== +CDB=cdb-dev +K8SNAME=pdb1 +PDBNAME=pdbdev +OPENMODE=READ WRITE +ACTION=CREATE +MSG=Success + +``` +[pdb creation log](./pdb_creation_log.txt) + +### MAKEFILE + +In order to facilitate the command execution use the [makefile](./makefile) available target details are exposed in the following tables. + +|target |Action | +|-----------------------------|-------------------------------------| +|step1 | Build rest server images | +|step2 | Tag the immages | +|step3 | Push the image into the repository | +|step4 | Load webhook certmanager | +|step5 | Create the db operator | +|step6 | Create tls certificates | +|step7 | Create tls secret | +|step8 | Create database secrets | +|step9 | Create restserver pod | +|checkstep9 | Monitor the executions | +|step10 | Create pluggable database | +|checkpdb | Monitor PDB status | +|dump | Dump pods info into a file | +|reloadop | Reload the db operator | +|login | Login into cdb pod | + +[imperative]:https://kubernetes.io/docs/concepts/overview/working-with-objects/object-management/ + + + diff --git a/docs/multitenant/usecase03/cdb_create.yaml b/docs/multitenant/usecase03/cdb_create.yaml new file mode 100644 index 00000000..d3b5e04f --- /dev/null +++ b/docs/multitenant/usecase03/cdb_create.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "cdb1-secret" + key: "sysadmin_pwd" + ordsPwd: + secret: + secretName: "cdb1-secret" + key: "ords_pwd" + cdbAdminUser: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_user" + cdbAdminPwd: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_pwd" + webServerUser: + secret: + secretName: "cdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "cdb1-secret" + key: "webserver_pwd" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + diff --git a/docs/multitenant/usecase03/cdb_creation_log.txt b/docs/multitenant/usecase03/cdb_creation_log.txt new file mode 100644 index 00000000..8c7dc161 --- /dev/null +++ b/docs/multitenant/usecase03/cdb_creation_log.txt @@ -0,0 +1,336 @@ +kubectl get pods -n cdbnamespace +NAME READY STATUS RESTARTS AGE +cdb-dev-ords-rs-pgqqh 0/1 ContainerCreating 0 1s + +kubectl get pods -n cdbnamespace +NAME READY STATUS RESTARTS AGE +cdb-dev-ords-rs-pgqqh 1/1 Running 0 6s + +kubectl logs -f `/usr/bin/kubectl get pods -n cdbnamespace|grep ords|cut -d ' ' -f 1` -n cdbnamespace +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M +NOT_INSTALLED=2 + SETUP +==================================================== +CONFIG=/etc/ords/config +total 0 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:20 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.connectionType was set to: customurl in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:21 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.customURL was set to: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:23 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: security.requestValidationFunction was set to: false in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:25 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.MaxLimit was set to: 100 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:27 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.InitialLimit was set to: 50 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:29 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: error.externalPath was set to: /opt/oracle/ords/error +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:31 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.access.log was set to: /home/oracle +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:32 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.port was set to: 8888 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:34 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert was set to: /opt/oracle/ords//secrets/tls.crt +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:36 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert.key was set to: /opt/oracle/ords//secrets/tls.key +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:38 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: restEnabledSql.active was set to: true in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:40 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: security.verifySSL was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:42 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.enabled was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:43 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: plsql.gateway.mode was set to: disabled in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:45 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.management.services.disabled was set to: false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:47 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: misc.pagination.maxRows was set to: 1000 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:49 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser was set to: C##DBAPI_CDB_ADMIN AS SYSDBA in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:51 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser.password was set to: ****** in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:53 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Created user welcome in file /etc/ords/config/global/credentials +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:17:55 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Oracle REST Data Services - Non-Interactive Install + +Retrieving information.. +Completed verifying Oracle REST Data Services schema version 23.3.0.r2891830. +Connecting to database user: ORDS_PUBLIC_USER url: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +The setting named: db.serviceNameSuffix was set to: in configuration: default +The setting named: db.username was set to: ORDS_PUBLIC_USER in configuration: default +The setting named: db.password was set to: ****** in configuration: default +The setting named: security.requestValidationFunction was set to: ords_util.authorize_plsql_gateway in configuration: default +2024-01-25T17:17:58.898Z INFO Oracle REST Data Services schema version 23.3.0.r2891830 is installed. +2024-01-25T17:17:58.900Z INFO To run in standalone mode, use the ords serve command: +2024-01-25T17:17:58.900Z INFO ords --config /etc/ords/config serve +2024-01-25T17:17:58.900Z INFO Visit the ORDS Documentation to access tutorials, developer guides and more to help you get started with the new ORDS Command Line Interface (http://oracle.com/rest). +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.3 Production on Thu Jan 25 17:18:00 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +2024-01-25T17:18:00.960Z INFO HTTP and HTTP/2 cleartext listening on host: 0.0.0.0 port: 8080 +2024-01-25T17:18:00.963Z INFO HTTPS and HTTPS/2 listening on host: 0.0.0.0 port: 8888 +2024-01-25T17:18:00.980Z INFO Disabling document root because the specified folder does not exist: /etc/ords/config/global/doc_root +2024-01-25T17:18:00.981Z INFO Default forwarding from / to contextRoot configured. +2024-01-25T17:18:06.634Z INFO Configuration properties for: |default|lo| +db.serviceNameSuffix= +java.specification.version=21 +conf.use.wallet=true +database.api.management.services.disabled=false +sun.jnu.encoding=UTF-8 +user.region=US +java.class.path=/opt/oracle/ords/ords.war +java.vm.vendor=Oracle Corporation +standalone.https.cert.key=/opt/oracle/ords//secrets/tls.key +sun.arch.data.model=64 +nashorn.args=--no-deprecation-warning +java.vendor.url=https://java.oracle.com/ +resource.templates.enabled=false +user.timezone=UTC +java.vm.specification.version=21 +os.name=Linux +sun.java.launcher=SUN_STANDARD +user.country=US +sun.boot.library.path=/usr/java/jdk-21/lib +sun.java.command=/opt/oracle/ords/ords.war --config /etc/ords/config serve --port 8888 --secure +jdk.debug=release +sun.cpu.endian=little +user.home=/home/oracle +oracle.dbtools.launcher.executable.jar.path=/opt/oracle/ords/ords.war +user.language=en +db.cdb.adminUser.password=****** +java.specification.vendor=Oracle Corporation +java.version.date=2023-10-17 +database.api.enabled=true +java.home=/usr/java/jdk-21 +db.username=ORDS_PUBLIC_USER +file.separator=/ +java.vm.compressedOopsMode=32-bit +line.separator= + +restEnabledSql.active=true +java.specification.name=Java Platform API Specification +java.vm.specification.vendor=Oracle Corporation +java.awt.headless=true +standalone.https.cert=/opt/oracle/ords//secrets/tls.crt +db.password=****** +sun.management.compiler=HotSpot 64-Bit Tiered Compilers +security.requestValidationFunction=ords_util.authorize_plsql_gateway +misc.pagination.maxRows=1000 +java.runtime.version=21.0.1+12-LTS-29 +user.name=oracle +error.externalPath=/opt/oracle/ords/error +stdout.encoding=UTF-8 +path.separator=: +db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA +os.version=5.4.17-2136.323.8.1.el7uek.x86_64 +java.runtime.name=Java(TM) SE Runtime Environment +file.encoding=UTF-8 +plsql.gateway.mode=disabled +security.verifySSL=true +standalone.https.port=8888 +java.vm.name=Java HotSpot(TM) 64-Bit Server VM +java.vendor.url.bug=https://bugreport.java.com/bugreport/ +java.io.tmpdir=/tmp +oracle.dbtools.cmdline.ShellCommand=ords +java.version=21.0.1 +user.dir=/home/oracle/keystore +os.arch=amd64 +java.vm.specification.name=Java Virtual Machine Specification +jdbc.MaxLimit=100 +oracle.dbtools.cmdline.home=/opt/oracle/ords +native.encoding=UTF-8 +java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib +java.vendor=Oracle Corporation +java.vm.info=mixed mode, sharing +stderr.encoding=UTF-8 +java.vm.version=21.0.1+12-LTS-29 +sun.io.unicode.encoding=UnicodeLittle +jdbc.InitialLimit=50 +db.connectionType=customurl +java.class.version=65.0 +db.customURL=jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +standalone.access.log=/home/oracle + +2024-01-25T17:18:10.381Z INFO + +Mapped local pools from /etc/ords/config/databases: + /ords/ => default => VALID + + +2024-01-25T17:18:10.532Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 23.3.0.r2891830 +Oracle REST Data Services server info: jetty/10.0.17 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 21.0.1+12-LTS-29 + + +exec -it `kubectl get pods -n cdbnamespace |grep ords|cut -d ' ' -f 1` -n cdbnamespace bash +[oracle@cdb-dev-ords-rs-pgqqh ~]$ ps -ef|grep java +oracle 1147 1116 10 17:17 ? 00:00:21 /usr/java/jdk-21/bin/java -Doracle.dbtools.cmdline.home=/opt/oracle/ords -Duser.language=en -Duser.region=US -Dfile.encoding=UTF-8 -Djava.awt.headless=true -Dnashorn.args=--no-deprecation-warning -Doracle.dbtools.cmdline.ShellCommand=ords -Duser.timezone=UTC -jar /opt/oracle/ords/ords.war --config /etc/ords/config serve --port 8888 --secure +oracle 1227 1200 0 17:21 pts/0 00:00:00 grep --color=auto java diff --git a/docs/multitenant/usecase03/cdb_secret.yaml b/docs/multitenant/usecase03/cdb_secret.yaml new file mode 100644 index 00000000..8f1b6fc9 --- /dev/null +++ b/docs/multitenant/usecase03/cdb_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: cdb1-secret + namespace: cdbnamespace +type: Opaque +data: + ords_pwd: "[...base64 encoded password...]" + sysadmin_pwd: "[...base64 encoded password...]" + cdbadmin_user: "[...base64 encoded password...]" + cdbadmin_pwd: "[...base64 encoded password...]" + webserver_user: "[...base64 encoded password...]" + webserver_pwd: "[...base64 encoded password...]" diff --git a/docs/multitenant/usecase03/gentlscert.sh b/docs/multitenant/usecase03/gentlscert.sh new file mode 100644 index 00000000..49e29147 --- /dev/null +++ b/docs/multitenant/usecase03/gentlscert.sh @@ -0,0 +1,23 @@ +#!/bin/bash +export CDB_NAMESPACE=cdbnamespace +export PDB_NAMESPACE=pdbnamespace +export OPR_NAMESPACE=oracle-database-operator-system +export SKEY=tls.key +export SCRT=tls.crt +export CART=ca.crt +export COMPANY=oracle +export REST_SERVER=ords + +openssl genrsa -out ca.key 2048 +openssl req -new -x509 -days 365 -key ca.key -subj "/C=CN/ST=GD/L=SZ/O=${COMPANY}, Inc./CN=${COMPANY} Root CA" -out ca.crt +openssl req -newkey rsa:2048 -nodes -keyout ${SKEY} -subj "/C=CN/ST=GD/L=SZ/O=${COMPANY}, Inc./CN=cdb-dev-${REST_SERVER}.${CDB_NAMESPACE}" -out server.csr +echo "subjectAltName=DNS:cdb-dev-${REST_SERVER}.${CDB_NAMESPACE},DNS:www.example.com" > extfile.txt +openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out ${SCRT} + +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${CDB_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${CDB_NAMESPACE} +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${PDB_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${PDB_NAMESPACE} +kubectl create secret tls db-tls --key="${SKEY}" --cert="${SCRT}" -n ${OPR_NAMESPACE} +kubectl create secret generic db-ca --from-file="${CART}" -n ${OPR_NAMESPACE} + diff --git a/docs/multitenant/usecase03/makefile b/docs/multitenant/usecase03/makefile new file mode 100644 index 00000000..7270a5e0 --- /dev/null +++ b/docs/multitenant/usecase03/makefile @@ -0,0 +1,285 @@ +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# +# ___ +# / _ \ _ __ _ __ _ __ ___ _ __ ___ +# | | | | '_ \| '_ \| '__/ _ \ '_ ` _ \ +# | |_| | | | | |_) | | | __/ | | | | | +# \___/|_| |_| .__/|_| \___|_| |_| |_| +# |_| +# ____ _ _ _ +# / ___|___ _ __ | |_ _ __ ___ | | | ___ _ __ +# | | / _ \| '_ \| __| '__/ _ \| | |/ _ \ '__| +# | |__| (_) | | | | |_| | | (_) | | | __/ | +# \____\___/|_| |_|\__|_| \___/|_|_|\___|_| +# +# +# This makefile helps to speed up the kubectl commands executions to deploy and test +# the mutlitenant operator. Although it has few functionality you can adapt to your needs +# by adding much more targets. +# +# Quick start: +# ~~~~~~~~~~~ +# +# - Copy files of tab.1 in the makefile directory. +# - Edit the secret files and other yaml files with the correct credential as +# specified in the documentation. +# - Edit makefile updating variables of tab.2 +# - Execute commands of tab.3 "make step1" "make step2" "make step3".... +# +# Tab.1 - List of required files +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# |oracle-database-operator.yaml| Opertaor yaml file | +# +-----------------------------+---------------------------------------------+ +# |cdb_secret.yaml | Secret file for the rest server pod | +# +-----------------------------+---------------------------------------------+ +# |pdb_secret.yaml | Secret file for the pdb creation | +# +-----------------------------+---------------------------------------------+ +# |cdb_create.yaml | Rest server pod creation | +# +-----------------------------+---------------------------------------------+ +# |pdb_create.yaml | Pluggable database creation | +# +-----------------------------+---------------------------------------------+ +# |oracle-database-operator.yaml| Database operator | +# +-----------------------------+---------------------------------------------+ +# |Dockerfiles | Dockerfile for CBD | +# +-----------------------------+---------------------------------------------+ +# |runOrdsSSL.sh | Init script executed by Dockerfile | +# +-----------------------------+---------------------------------------------+ +# +# Tab.2 - List of variables +# ~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# |OCIR | Your image registry | +# +-----------------------------+---------------------------------------------+ +# |OCIRPATH | Path of the image in your registry | +# +-----------------------------+---------------------------------------------+ +# +# Tab.3 - Execution steps +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# +-----------------------------+---------------------------------------------+ +# | MAKEFILE TARGETS LIST | +# | ----- ooo ----- | +# | - TARGET - - DESCRIPTION - | +# +-----------------------------+-------------------------------------+-------+ +# |step1 | Build rest server images | | +# +-----------------------------+-------------------------------------+ REST | +# |step2 | Tag the immages | SRV | +# +-----------------------------+-------------------------------------+ IMG | +# |step3 | Push the image into the repository | | +# +-----------------------------+-------------------------------------+-------+ +# |step4 | Load webhook certmanager | DB | +# +-----------------------------+-------------------------------------+ OPER | +# |step5 | Create the db operator | | +# +-----------------------------+-------------------------------------+-------+ +# |step6 | Create tls certificates | T | +# +-----------------------------+-------------------------------------+ L | +# |step7 | Create tls secret | S | +# +-----------------------------+---------------------------------------------+ +# |step8 | Create database secrets | +# +-----------------------------+---------------------------------------------+ +# |step9 | Create restserver pod | +# | | +---------------------------------------------+ +# | +---> checkstep9 | Monitor the executions | +# +-----------------------------+---------------------------------------------+ +# |step10 | Create pluggable database | +# | | +---------------------------------------------+ +# | +---> checkpdb | Monitor PDB status | +# +-----------------------------+---------------------------------------------+ +# | DIAGNOSTIC TARGETS | +# +-----------------------------+---------------------------------------------+ +# | dump | Dump pods info into a file | +# +-----------------------------+---------------------------------------------+ +# | reloadop | Reload the db operator | +# +-----------------------------+---------------------------------------------+ +# | login | Login into cdb pod | +# +-----------------------------+---------------------------------------------+ + + +################ TAB 2 VARIABLES ############ +REST_SERVER=ords +ORDSVERSION=latest + +OCIR=[container registry] +OCIRPATH=$(REST_SERVER)-dboper:$(ORDSVERSION) + +#examples: +#OCIR=lin.ocir.io +#OCIRPATH=/sampletenancy/samplepath/sampledir/$(REST_SERVER)-dboper:$(ORDSVERSION) +############################################# +DOCKER=/usr/bin/docker +KUBECTL=/usr/bin/kubectl +ORDS=/usr/local/bin/ords +CONFIG=/etc/ords/config +IMAGE=oracle/$(REST_SERVER)-dboper:$(ORDSVERSION) +DBOPERATOR=oracle-database-operator.yaml +URLPATH=/_/db-api/stable/database/pdbs/ +OPENSSL=/usr/bin/openssl +ORDSPORT=8888 +MAKE=/usr/bin/make +DOCKERFILE=../../../ords/Dockerfile +RUNSCRIPT=../../../ords/runOrdsSSL.sh +RM=/usr/bin/rm +CP=/bin/cp +ECHO=/usr/bin/echo +CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +CDB_SECRET_YAML=cdb_secret.yaml +PDB_SECRET_YAML=pdb_secret.yaml +TDE_SECRET_YAML=tde_secret.yaml +CDB_NAMESPACE_YAML=ns_namespace_cdb.yaml +PDB_NAMESPACE_YAML=ns_namespace_pdb.yaml +OPR_NAMESPACE=oracle-database-operator-system +PDB_NAMESPACE=$(shell grep namespace $(PDB_NAMESPACE_YAML) |cut -d: -f 2| tr -d ' ') +CDB_NAMESPACE=$(shell grep namespace $(CDB_NAMESPACE_YAML) |cut -d: -f 2| tr -d ' ') +CDB=cdb_create.yaml +PDB=pdb_create.yaml +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +COMPANY=oracle +LOCALHOST=localhost +RESTPREFIX=cdb-dev + + +step1: createimage +step2: tagimage +step3: push +step4: certmanager +step5: dboperator +step6: tlscert +step7: tlssecret +step8: dbsecret +step9: cdb +step10: pdb + +checkstep9: checkcdb + + +createimage: + @echo "BUILDING CDB IMAGES" + $(CP) $(DOCKERFILE) . + $(CP) $(RUNSCRIPT) . + $(DOCKER) build -t $(IMAGE) . + +tagimage: + @echo "TAG IMAGE" + $(DOCKER) tag $(IMAGE) $(OCIR)$(OCIRPATH) + +push: + @echo "PUSH IMAGE INTO THE REGISTRY" + $(DOCKER) push $(OCIR)$(OCIRPATH) + +certmanager: + @echo "WEBHOOK CERT MANAGER" + $(KUBECTL) apply -f $(CERTMANAGER) + +dboperator: + @echo "ORACLE DATABASE OPERATOR" + $(KUBECTL) apply -f $(DBOPERATOR) + +namespace: + $(KUBECTL) get namespaces + $(KUBECTL) apply -f $(CDB_NAMESPACE_YAML) + $(KUBECTL) apply -f $(PDB_NAMESPACE_YAML) + $(KUBECTL) get namespaces + + +tlscert: + @echo "CREATING TLS CERTIFICATES" + $(OPENSSL) genrsa -out ca.key 2048 + $(OPENSSL) req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE) /CN=$(LOCALHOST) Root CA " -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE) /CN=$(LOCALHOST)" -out server.csr + $(ECHO) "subjectAltName=DNS:$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE),DNS:www.example.com" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out $(SCRT) + + +tlssecret: + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(CDB_NAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(CDB_NAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(PDB_NAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(PDB_NAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(OPR_NAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(OPR_NAMESPACE) + + +dbsecret: + @echo "CREATING DB SECRETS" + $(KUBECTL) apply -f $(CDB_SECRET_YAML) + $(KUBECTL) apply -f $(PDB_SECRET_YAML) + $(KUBECTL) apply -f $(TDE_SECRET_YAML) + + +cdb: + @echo "CREATING REST SRV POD" + $(KUBECTL) apply -f $(CDB) + +checkcdb: + $(KUBECTL) logs -f `$(KUBECTL) get pods -n $(CDB_NAMESPACE)|grep $(REST_SERVER)|cut -d ' ' -f 1` -n $(CDB_NAMESPACE) + +pdb: + $(KUBECTL) apply -f $(PDB) + +checkpdb: + $(KUBECTL) get pdbs -n $(OPR_NAMESPACE) + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPR_NAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPR_NAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPR_NAMESPACE) >>$(DIAGFILE) + @echo "CDB LOG DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs `$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep $(REST_SERVER)| cut -d ' ' -f 1` -n $(OPR_NAMESPACE) >>$(DIAGFILE) + @echo "SECRET DMP" >>$(DIAGFILE) + @echo "~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) get secrets -o yaml -n $(OPR_NAMESPACE) >> $(DIAGFILE) + @echo "CDB/PDB DMP" >> $(DIAGFILE) + $(KUBECTL) get pdbs -o yaml -n $(OPR_NAMESPACE) >> $(DIAGFILE) + $(KUBECTL) get cdb -o yaml -n $(OPR_NAMESPACE) >> $(DIAGFILE) + @echo "CLUSTER INFO" >> $(DIAGFILE) + $(KUBECTL) get nodes -o wide + $(KUBECTL) get svc --namespace=kube-system + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPR_NAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPR_NAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPR_NAMESPACE) -o yaml | kubectl replace --force -f - + +login: + $(KUBECTL) exec -it `$(KUBECTL) get pods -n $(CDB_NAMESPACE) |grep $(REST_SERVER)|cut -d ' ' -f 1` -n $(CDB_NAMESPACE) bash + +cdblog: + $(KUBECTL) logs -f `$(KUBECTL) get pods -n $(CDB_NAMESPACE)|grep $(REST_SERVER)|cut -d ' ' -f 1` -n $(CDB_NAMESPACE) + + + +xlog1: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPR_NAMESPACE) + +xlog2: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1` -n $(OPR_NAMESPACE) + +xlog3: + $(KUBECTL) logs -f pod/`$(KUBECTL) get pods -n $(OPR_NAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPR_NAMESPACE) + +checkdep: + $(KUBECTL) api-resources --verbs=list --namespaced -o name | xargs -n 1 $(KUBECTL) get -n $(OPR_NAMESPACE) + $(KUBECTL) api-resources --verbs=list --namespaced -o name | xargs -n 1 $(KUBECTL) get -n $(CBD_NAMESPACE) + $(KUBECTL) api-resources --verbs=list --namespaced -o name | xargs -n 1 $(KUBECTL) get -n $(PDB_NAMESPACE) + + + diff --git a/docs/multitenant/usecase03/ns_namespace_cdb.yaml b/docs/multitenant/usecase03/ns_namespace_cdb.yaml new file mode 100644 index 00000000..f4c6d77b --- /dev/null +++ b/docs/multitenant/usecase03/ns_namespace_cdb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: cdbnamespace + diff --git a/docs/multitenant/usecase03/ns_namespace_pdb.yaml b/docs/multitenant/usecase03/ns_namespace_pdb.yaml new file mode 100644 index 00000000..b22245f9 --- /dev/null +++ b/docs/multitenant/usecase03/ns_namespace_pdb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: pdbnamespace + diff --git a/docs/multitenant/usecase03/operator_creation_log.txt b/docs/multitenant/usecase03/operator_creation_log.txt new file mode 100644 index 00000000..36ed02ac --- /dev/null +++ b/docs/multitenant/usecase03/operator_creation_log.txt @@ -0,0 +1,27 @@ +kubectl apply -f oracle-database-operator.yaml +namespace/oracle-database-operator-system created +customresourcedefinition.apiextensions.k8s.io/autonomouscontainerdatabases.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/autonomousdatabasebackups.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/autonomousdatabaserestores.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/autonomousdatabases.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/cdbs.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/dataguardbrokers.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/dbcssystems.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/oraclerestdataservices.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/pdbs.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/shardingdatabases.database.oracle.com configured +customresourcedefinition.apiextensions.k8s.io/singleinstancedatabases.database.oracle.com configured +role.rbac.authorization.k8s.io/oracle-database-operator-leader-election-role created +clusterrole.rbac.authorization.k8s.io/oracle-database-operator-manager-role created +clusterrole.rbac.authorization.k8s.io/oracle-database-operator-metrics-reader created +clusterrole.rbac.authorization.k8s.io/oracle-database-operator-oracle-database-operator-proxy-role created +rolebinding.rbac.authorization.k8s.io/oracle-database-operator-oracle-database-operator-leader-election-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/oracle-database-operator-oracle-database-operator-manager-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/oracle-database-operator-oracle-database-operator-proxy-rolebinding created +service/oracle-database-operator-controller-manager-metrics-service created +service/oracle-database-operator-webhook-service created +certificate.cert-manager.io/oracle-database-operator-serving-cert created +issuer.cert-manager.io/oracle-database-operator-selfsigned-issuer created +mutatingwebhookconfiguration.admissionregistration.k8s.io/oracle-database-operator-mutating-webhook-configuration created +validatingwebhookconfiguration.admissionregistration.k8s.io/oracle-database-operator-validating-webhook-configuration created +deployment.apps/oracle-database-operator-controller-manager created diff --git a/docs/multitenant/usecase03/pdb_create.yaml b/docs/multitenant/usecase03/pdb_create.yaml new file mode 100644 index 00000000..200f3712 --- /dev/null +++ b/docs/multitenant/usecase03/pdb_create.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + diff --git a/docs/multitenant/usecase03/pdb_creation_log.txt b/docs/multitenant/usecase03/pdb_creation_log.txt new file mode 100644 index 00000000..71d0eb4f --- /dev/null +++ b/docs/multitenant/usecase03/pdb_creation_log.txt @@ -0,0 +1,6 @@ +kubectl apply -f pdb_create.yaml +pdb.database.oracle.com/pdb1 created + +kubectl get pdbs -n pdbnamespace +NAME CONNECT_STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE +pdb1 (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdbdev))) DB12 pdbdev READ WRITE 0.78G Ready Success diff --git a/docs/multitenant/usecase03/pdb_secret.yaml b/docs/multitenant/usecase03/pdb_secret.yaml new file mode 100644 index 00000000..f1dfdac6 --- /dev/null +++ b/docs/multitenant/usecase03/pdb_secret.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: pdb1-secret + namespace: pdbnamespace +type: Opaque +data: + sysadmin_user: "[...base64 encoded password...]" + sysadmin_pwd: "[...base64 encoded password...]" + webserver_user: "[...base64 encoded password...]" + webserver_pwd: "[...base64 encoded password...]" + diff --git a/docs/multitenant/usecase03/runOrdsSSL.sh b/docs/multitenant/usecase03/runOrdsSSL.sh new file mode 100644 index 00000000..35f1b77b --- /dev/null +++ b/docs/multitenant/usecase03/runOrdsSSL.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +cat <$TNSNAME + + +function SetParameter() { + ##ords config info <--- Use this command to get the list + +[[ ! -z "${ORACLE_HOST}" && -z "${DBTNSURL}" ]] && { + $ORDS --config ${CONFIG} config set db.hostname ${ORACLE_HOST:-racnode1} + $ORDS --config ${CONFIG} config set db.port ${ORACLE_PORT:-1521} + $ORDS --config ${CONFIG} config set db.servicename ${ORACLE_SERVICE:-TESTORDS} +} + +[[ -z "${ORACLE_HOST}" && ! -z "${DBTNSURL}" ]] && { + #$ORDS --config ${CONFIG} config set db.tnsAliasName ${TNSALIAS} + #$ORDS --config ${CONFIG} config set db.tnsDirectory ${TNS_ADMIN} + #$ORDS --config ${CONFIG} config set db.connectionType tns + + $ORDS --config ${CONFIG} config set db.connectionType customurl + $ORDS --config ${CONFIG} config set db.customURL jdbc:oracle:thin:@${DBTNSURL} +} + + $ORDS --config ${CONFIG} config set security.requestValidationFunction false + $ORDS --config ${CONFIG} config set jdbc.MaxLimit 100 + $ORDS --config ${CONFIG} config set jdbc.InitialLimit 50 + $ORDS --config ${CONFIG} config set error.externalPath ${ERRORFOLDER} + $ORDS --config ${CONFIG} config set standalone.access.log /home/oracle + $ORDS --config ${CONFIG} config set standalone.https.port 8888 + $ORDS --config ${CONFIG} config set standalone.https.cert ${CERTIFICATE} + $ORDS --config ${CONFIG} config set standalone.https.cert.key ${KEY} + $ORDS --config ${CONFIG} config set restEnabledSql.active true + $ORDS --config ${CONFIG} config set security.verifySSL true + $ORDS --config ${CONFIG} config set database.api.enabled true + $ORDS --config ${CONFIG} config set plsql.gateway.mode disabled + $ORDS --config ${CONFIG} config set database.api.management.services.disabled false + $ORDS --config ${CONFIG} config set misc.pagination.maxRows 1000 + $ORDS --config ${CONFIG} config set db.cdb.adminUser "${CDBADMIN_USER:-C##DBAPI_CDB_ADMIN} AS SYSDBA" + $ORDS --config ${CONFIG} config secret --password-stdin db.cdb.adminUser.password << EOF +${CDBADMIN_PWD:-PROVIDE_A_PASSWORD} +EOF + +$ORDS --config ${CONFIG} config user add --password-stdin ${WEBSERVER_USER:-ordspdbadmin} "SQL Administrator, System Administrator" <${CKF} 2>&1 +echo "checkfile" >> ${CKF} +NOT_INSTALLED=`cat ${CKF} | grep "INFO: The" |wc -l ` +echo NOT_INSTALLED=$NOT_INSTALLED + + +function StartUp () { + $ORDS --config $CONFIG serve --port 8888 --secure +} + +# Check whether ords is already setup +if [ $NOT_INSTALLED -ne 0 ] +then + echo " SETUP " + setupOrds; + StartUp; +fi + +if [ $NOT_INSTALLED -eq 0 ] +then + echo " STARTUP " + StartUp; +fi + + diff --git a/docs/observability/README.md b/docs/observability/README.md new file mode 100644 index 00000000..5a281c9c --- /dev/null +++ b/docs/observability/README.md @@ -0,0 +1,610 @@ +# Managing Observability on Kubernetes for Oracle Databases + +Oracle Database Operator for Kubernetes (`OraOperator`) includes the +Observability controller for Oracle Databases and adds the `DatabaseObserver` CRD, which enables users to observe +Oracle Databases by scraping database metrics using SQL queries and observe logs in the Database _alert.log_. The controller +automates the deployment and maintenance of the metrics exporter container image, +metrics exporter service and Prometheus servicemonitor. + +The following sections explains the configuration and functionality +of the controller. + +* [Prerequisites](#prerequisites) +* [The DatabaseObserver Custom Resource Definition](#the-databaseobserver-custom-resource) + * [Configuration Options](#configuration-options) + * [Resources Managed by the Controller](#resources-managed-by-the-controller) +* [DatabaseObserver Operations](#databaseobserver-operations) + * [Create](#create-resource) + * [List](#list-resource) + * [Get Status](#get-detailed-status) + * [Update](#patch-resource) + * [Delete](#delete-resource) +* [Configuration Options for Scraping Metrics](#scraping-metrics) + * [Custom Metrics Config](#custom-metrics-config) + * [Prometheus Release](#prometheus-release) +* [Configuration Options for Scraping Logs](#scraping-logs) + * [Custom Log Location with PersistentVolumes](#custom-log-location-with-persistentvolumes) + * [Example Working with Sidecars and Promtail](#working-with-sidecars-to-deploy-promtail) + * [Promtail Config Example](#Promtail-Config-Example) +* [Other Configuration Options](#other-configuration-options) + * [Labels](#labels) + * [Custom Exporter Image or Version](#custom-exporter-image-or-version) +* [Mandatory Roles and Privileges](#mandatory-roles-and-privileges-requirements-for-observability-controller) +* [Debugging and troubleshooting](#debugging-and-troubleshooting) +* [Known Issues](#known-issues) + +## Prerequisites +The `DatabaseObserver` custom resource has the following prerequisites: + +1. Prometheus and its `servicemonitor` custom resource definition must be installed on the cluster. + +- The Observability controller creates multiple Kubernetes resources that include + a Prometheus `servicemonitor`. For the controller + to create ServiceMonitors, the ServiceMonitor custom resource must exist. + +2. A preexisting Oracle Database and the proper database grants and privileges. + +- The controller exports metrics through SQL queries that the user can control + and specify through a _toml_ file. The necessary access privileges to the tables used in the queries + are not provided and applied automatically. + +## The DatabaseObserver Custom Resource +The Oracle Database Operator (__v1.2.0__ or later) includes the Oracle Database Observability controller, which automates +the deployment and setting up of the Oracle Database exporter and the related resources to make Oracle Databases observable. + +In the example YAML file found in +[`./config/samples/observability/v4/databaseobserver.yaml`](../../config/samples/observability/v4/databaseobserver.yaml), +the databaseObserver custom resource provides the following configurable properties: + +| Attribute | Type | Default | Required? | Example | +|--------------------------------------------------------|--------|---------------------------------------------------------------------|:------------|-----------------------------------------------------------------------| +| `spec.database.dbUser.key` | string | user | Optional | _username_ | +| `spec.database.dbUser.secret` | string | - | Yes | _db-secret_ | +| `spec.database.dbPassword.key` | string | password | Optional | _admin-password_ | +| `spec.database.dbPassword.secret` | string | - | Conditional | _db-secret_ | +| `spec.database.dbPassword.vaultOCID` | string | - | Conditional | _ocid1.vault.oc1..._ | +| `spec.database.dbPassword.vaultSecretName` | string | - | Conditional | _db-vault_ | +| `spec.database.dbWallet.secret` | string | - | Conditional | _devsec-oradevdb-wallet_ | +| `spec.database.dbConnectionString.key` | string | connection | Optional | _connection_ | +| `spec.database.dbConnectionString.secret` | string | - | Yes | _db-secretg_ | +| `spec.sidecars` | array | - | Optional | - | +| `spec.sidecarVolumes` | array | - | Optional | - | +| `spec.exporter.deployment.securityContext` | object | | Optional | _ | +| `spec.exporter.deployment.env` | map | - | Optional | _DB_ROLE: "SYSDBA"_ | +| `spec.exporter.deployment.image` | string | container-registry.oracle.com/database/observability-exporter:1.5.1 | Optional | _container-registry.oracle.com/database/observability-exporter:1.3.0_ | +| `spec.exporter.deployment.args` | array | - | Optional | _[ "--log.level=info" ]_ | +| `spec.exporter.deployment.commands` | array | - | Optional | _[ "/oracledb_exporter" ]_ | +| `spec.exporter.deployment.labels` | map | - | Optional | _environment: dev_ | +| `spec.exporter.deployment.podTemplate.labels` | map | - | Optional | _environment: dev_ | +| `spec.exporter.deployment.podTemplate.securityContext` | object | - | Optional | _ | +| `spec.exporter.service.ports` | array | - | Optional | - | +| `spec.exporter.service.labels` | map | - | Optional | _environment: dev_ | | +| `spec.configuration.configMap.key` | string | config.toml | Optional | _config.toml_ | +| `spec.configuration.configMap.name` | string | - | Optional | _devcm-oradevdb-config_ | +| `spec.prometheus.serviceMonitor.labels` | map | - | Yes | _release: prometheus_ | +| `spec.prometheus.serviceMonitor.namespaceSelector` | - | - | Yes | - | +| `spec.prometheus.serviceMonitor.endpoints` | array | - | Optional | - | +| `spec.log.filename` | string | alert.log | Optional | _alert.log_ | +| `spec.log.path` | string | /log | Optional | _/log_ | +| `spec.log.volume.name` | string | log-volume | Optional | _my-persistent-volume_ | +| `spec.log.volume.persistentVolumeClaim.claimName` | string | - | Optional | _my-pvc_ | +| `spec.replicas` | number | 1 | Optional | _1_ | +| `spec.inheritLabels` | array | - | Optional | _- environment: dev_
- app.kubernetes.io/name: observer | +| `spec.ociConfig.configMapName` | string | - | Conditional | _oci-cred_ | +| `spec.ociConfig.secretName` | string | - | Conditional | _oci-privatekey_ | + + +### Configuration Options +The `databaseObserver` Custom resource has the following fields for all configurations that are required: +* `spec.database.dbUser.secret` - Secret containing the database username. The corresponding key can be any value but must match the key in the secret provided. +* `spec.database.dbPassword.secret` - Secret containing the database password (if `vault` is NOT used). The corresponding key field can be any value, but must match the key in the Secret provided +* `spec.database.dbConnectionString.secret` - Secret containing the database connection string. The corresponding key field can be any value but must match the key in the Secret provided +* `spec.prometheus.serviceMonitor.labels` - Custom labels to add to the service monitors labels. A label is required for your serviceMonitor to be discovered. This label must match what is set in the serviceMonitorSelector of your Prometheus configuration + +If a database wallet is required to connect, then the following field containing the wallet secret is required: +* `spec.database.dbWallet.secret` - Secret containing the database wallet. The filenames inside the wallet must be used as keys + +If vault is used to store the database password instead, then the following fields are required: +* `spec.database.dbPassword.vaultOCID` - OCID of the vault used +* `spec.database.dbPassword.vaultSecretName` - Name of the secret inside the desired vault +* `spec.ociConfig.configMapName` - Holds the rest of the information of the OCI API signing key. The following keys must be used: `fingerprint`, `region`, `tenancy` and `user` +* `spec.ociConfig.secretName` - Holds the private key of the OCI API signing key. The key to the file containing the user private key must be: `privatekey` + +The `databaseObserver` Resource provides the remaining multiple fields that are optional: +* `spec.prometheus.serviceMonitor.endpoints` - ServiceMonitor endpoints +* `spec.prometheus.serviceMonitor.namespaceSelector` - ServiceMonitor namespace selector +* `spec.sidecars` - List of containers to run as a sidecar container with the observability exporter container image +* `spec.sidecarVolumes` - Volumes of any sidecar containers +* `spec.log.path` - Custom path to create +* `spec.log.filename` - Custom filename for the log file +* `spec.log.volume.name` - Custom name for the log volume +* `spec.log.volume.persistentVolumeClaim.claimName` - A volume in which to place the log to be shared by the containers. If not specified, an EmptyDir is used by default. +* `spec.configuration.configMap.key` - Configuration filename inside the container and the configmap +* `spec.configuration.configMap.name` - Name of the `configMap` that holds the custom metrics configuration +* `spec.replicas` - Number of replicas to deploy +* `spec.exporter.service.ports` - Port number for the generated service to use +* `spec.exporter.service.labels` - Custom labels to add to service labels +* `spec.exporter.deployment.image` - Image version of observability exporter to use +* `spec.exporter.deployment.env` - Custom environment variables for the observability exporter +* `spec.exporter.deployment.labels` - Custom labels to add to deployment labels +* `spec.exporter.deployment.podTemplate.labels` - Custom labels to add to pod labels +* `spec.exporter.deployment.podTemplate.securityContext` - Configures pod securityContext +* `spec.exporter.deployment.args` - Additional arguments to provide the observability-exporter +* `spec.exporter.deployment.commands` - Commands to supply to the observability-exporter +* `spec.exporter.deployment.securityContext` - Configures container securityContext +* `spec.inheritLabels` - Keys of inherited labels from the databaseObserver resource. These labels are applied to generated resources. + +### Resources Managed by the Controller +When you create a `DatabaseObserver` resource, the controller creates and manages the following resources: + +1. __Deployment__ - The deployment will have the same name as the `databaseObserver` resource + - Deploys a container named `observability-exporter` + - The default container image version of the `container-registry.oracle.com/database/observability-exporter` supported is __[v1.5.1](https://github.com/oracle/oracle-db-appdev-monitoring/releases/tag/1.5.1)__ + +2. __Service__ - The service will have the same name as the databaseObserver + - The service is of type `ClusterIP` + +3. __Prometheus ServiceMonitor__ - The serviceMonitor will have the same name as the `databaseObserver` + +## DatabaseObserver Operations +### Create Resource +Follow the steps below to create a new `databaseObserver` resource object. + +1. To begin, creating a `databaseObserver` requires you to create and provide Kubernetes Secrets to provide connection details: +```bash +kubectl create secret generic db-secret \ + --from-literal=username='username' \ + --from-literal=password='password_here' \ + --from-literal=connection='dbsample_tp' +``` + +2. (Conditional) Create a Kubernetes Secret for the wallet (if a wallet is required to connect to the database). + +You can create this Secret by using a command similar to the example that follows. +If you are connecting to an Autunomous Database, and the operator is used to manage the Oracle Autonomous Database, then a client wallet can also be downloaded as a Secret through `kubectl` commands. See the ADB README section on [Download Wallets](../../docs/adb/README.md#download-wallets). + +You can also choose to create the wallet secret from a local directory containing the wallet files: +```bash +kubectl create secret generic db-wallet --from-file=wallet_dir +``` + +3. Finally, update the `databaseObserver` manifest with the resources you have created. You can use the example _minimal_ manifest +inside [config/samples/observability/v4](../../config/samples/observability/v4/databaseobserver_minimal.yaml) to specify and create your databaseObserver object with a +YAML file. + +```YAML +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: db-wallet + + prometheus: + serviceMonitor: + labels: + release: prometheus +``` + +```bash + kubectl apply -f databaseobserver.yaml +``` + +### List Resource +To list the Observability custom resources, use the following command as an example: +```bash +kubectl get databaseobserver -A +``` + +### Get Detailed Status +To obtain a quick status, use the following command as an example: + +> Note: The databaseobserver custom resource is named `obs-sample` in the next following sections. +> We will use this name as an example. + +```sh +$ kubectl get databaseobserver obs-sample +NAME EXPORTERCONFIG STATUS VERSION +obs-sample DEFAULT READY 1.5.1 +``` + + +To obtain a more detailed status, use the following command as an example: + +```bash +kubectl describe databaseobserver obs-sample +``` + +This command displays details of the current state of your `databaseObserver` resource object. A successful +deployment of the `databaseObserver` resource object should display `READY` as the status, and all conditions should display with a `True` value for every ConditionType. + + +### Patch Resource +The Observability controller currently supports updates for most of the fields in the manifest. The following is an example of patching the `databaseObserver` resource: +```bash +kubectl --type=merge -p '{"spec":{"exporter":{"image":"container-registry.oracle.com/database/observability-exporter:1.5.0"}}}' patch databaseobserver obs-sample +``` + +### Delete Resource + +To delete the `databaseObserver` custom resource and all related resources, use this command: + +```bash +kubectl delete databaseobserver obs-sample +``` + +## Scraping Metrics +The `databaseObserve`r resource deploys the Observability exporter container. This container connects to an Oracle Database and +scrapes metrics using SQL queries. By default, the exporter provides standard metrics, which are listed in the [official GitHub page of the Observability Exporter](https://github.com/oracle/oracle-db-appdev-monitoring?tab=readme-ov-file#standard-metrics). + +To define custom metrics in Oracle Database for scraping, a TOML file that lists your custom queries and properties is required. +The file will have metric sections with the following parts: +- a context +- a request, which contains the SQL query +- a map between the field(s) in the request and comment(s) + +For example, the code snippet that follows shows how you can define custom metrics: +```toml +[[metric]] +context = "test" +request = "SELECT 1 as value_1, 2 as value_2 FROM DUAL" +metricsdesc = { value_1 = "Simple example returning always 1.", value_2 = "Same but returning always 2." } +``` +This file produces the following entries: +``` +# HELP oracledb_test_value_1 Simple example returning always 1. +# TYPE oracledb_test_value_1 gauge +oracledb_test_value_1 1 +# HELP oracledb_test_value_2 Same but returning always 2. +# TYPE oracledb_test_value_2 gauge +oracledb_test_value_2 2 +``` + +You can find more information in the [__Custom Metrics__](https://github.com/oracle/oracle-db-appdev-monitoring?tab=readme-ov-file#custom-metrics) section of the Official GitHub page. + + + +### Custom Metrics Config +When configuring a `databaseObserver` resource, you can use the field `spec.configuration.configMap` to provide a +custom metrics file as a `configMap`. + +You can create the `configMap` by running the following command: +```bash +kubectl create cm custom-metrics-cm --from-file=metrics.toml +``` + +Finally, when creating or updating a `databaseObserver` resource, if we assume using the example above, you can set the fields in your YAML file as follows: +```yaml +spec: + configuration: + configMap: + key: "metrics.toml" + name: "custom-metrics-cm" +``` + +### Prometheus Release +To enable your Prometheus configuration to find and include the `ServiceMonitor` created by the `databaseObserver` resource, the field `spec.prometheus.serviceMonitor.labels` is an __important__ and __required__ field. The label on the ServiceMonitor +must match the `spec.serviceMonitorSelector` field in your Prometheus configuration. + +```yaml + prometheus: + serviceMonitor: + labels: + release: stable +``` + +## Scraping Logs +Currently, the observability exporter provides the `alert.log` from Oracle Database, which provides important information about errors and exceptions during database operations. + +By default, the logs are stored in the pod filesystem, inside `/log/alert.log`. Note that the log can also be placed in a custom path with a custom filename, You can also place a volume available to multiple pods with the use of `PersistentVolumes` by specifying a `persistentVolumeClaim`. +Because the logs are stored in a file, scraping the logs must be pushed to a log aggregation system, such as _Loki_. +In the following example, `Promtail` is used as a sidecar container that ships the contents of local logs to the Loki instance. + + +To configure the `databaseObserver` resource with a sidecar, two fields can be used: +```yaml +spec: + sidecars: [] + sidecarVolumes: [] +``` + +You can find an example in the `samples` directory, which deploys a Promtail sidecar container as an example: +[`config/samples/observability/v4/databaseobserver_logs_promtail.yaml`](../../config/samples/observability/v4/databaseobserver_logs_promtail.yaml) + +### Custom Log Location with PersistentVolumes + +The fields `spec.log.filename` and `spec.log.path` enable you to configure a custom location and filename for the log. +Using a custom location enables you to control where to place the logfile, such as a `persistentVolume`. + +```yaml + log: + filename: "alert.log" + path: "/log" +``` + +To configure the `databaseObserver` resource to put the log file in a `persistentVolume`, you can set the following fields +in your `databaseObserver` YAML file. The field `spec.log.volume.name` is provided to control the name of the volume used +for the log, while the field `spec.log.volume.persistentVolumeClaim.claimName` is used to specify the claim to use. +These details can be used with any sidecar containers, or with other containers. + +If `spec.log.volume.persistentVolumeClaim.claimName` is not specified, then an `EmptyDir` volume is automatically used. + +> Important Note: the volume name must match all references of the volume, such as in any sidecar containers that use and mount this volume. + +```yaml + log: + volume: + name: my-log-volume + persistentVolumeClaim: + claimName: "my-pvc" +``` + +The security context defines privilege and access control settings for a pod container, If these privileges and access control settingrs need to be updated in the pod, then the same field is available on the `databaseObserver` spec. You can set this object under deployment: `spec.exporter.deployment.securityContext`. + +```yaml +spec: + exporter: + deployment: + runAsUser: 1000 +``` + +Configuring security context under the PodTemplate is also possible. You can set this object under: `spec.exporter.deployment.podTemplate.securityContext` + +```yaml +spec: + exporter: + deployment: + podTemplate: + securityContext: + supplementalGroups: [1000] +``` + + +### Working with Sidecars to deploy Promtail +The fields `spec.sidecars` and `spec.sidecarVolumes` provide the ability to deploy container images as a sidecar container +alongside the `observability-exporter` container. + +You can specify container images to deploy inside `spec.sidecars` as you would normally define a container in a deployment. The field +`spec.sidecars` is of an array of containers (`[]corev1.Container`). + +For example, to deploy a Grafana Promtail image, you can specify the container and its details as an element to the array, `spec.sidecars`. +```yaml + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/config.yaml + volumeMounts: + - name: promtail-config-volume + mountPath: /etc/promtail + - name: my-log-volume + mountPath: /log +``` + +> Important Note: Make sure the volumeMount name matches the actual name of the volumes referenced. In this case, `my-log-volume` is referenced in `spec.log.volume.name`. + +In the field `spec.sidecarVolumes`, you can specify and list the volumes you need in your sidecar containers. The field +`spec.sidecarVolumes` is an array of Volumes (`[]corev1.Volume`). + +For example, when deploying the Promtail container, you can specify in the field any volume that needs to be mounted in the sidecar container above. + +```yaml + sidecarVolumes: + - name: promtail-config-volume + configMap: + name: promtail-config-file +``` + +In this example, the `promtail-config-file` `configMap` contains the Promtail configuration, which specifies where to find +the target and the path to the file, as well as the endpoint where Loki is listening for any push API requests. + +__Promtail Config Example__ + +```yaml +# config.yaml +server: + http_listen_port: 9080 + grpc_listen_port: 0 +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://{loki-endpoint}:3100/loki/api/v1/push + +scrape_configs: + - job_name: "alert-log" + static_configs: + - targets: + - localhost + labels: + app: {my-database-observer-label} + __path__: /log/*.log + ``` + +To create the `configmap`, you can run the following command: +```bash +kubectl create cm promtail-config-file --from-file=config.yaml +``` + + +## Other Configuration Options + +### Labels + +__About the Default Label__ - The resources created by the Observability Controller will automatically be labelled with: +- `app`: `` + + +For example, if the `databaseObserver` instance is named: `metrics-exporter`, then resources such as the deployment will be labelled +with `app: metrics-exporter`. This label `cannot be overwritten` as this label is used by multiple resources created. Selectors used by the deployment, service and servicemonitor use this label. + +The following configuration shows an example: + +```yaml +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: metrics-exporter + labels: + app: my-db-metrics + some: non-inherited-label +spec: + + # inheritLabels + inheritLabels: + - some + + # ... +``` + +Meanwhile, you can provide extra labels to the resources created by the `databaseObserver` controller, such as the Deployment, Pods, Service and ServiceMonitor. +```yaml +spec: + exporter: + deployment: + labels: + podTemplate: + labels: + service: + labels: + prometheus: + serviceMonitor: + labels: +``` + +### Custom Exporter Image or Version +The field `spec.exporter.deployment.image` is provided to enable you to make use of a newer or older version of the [observability-exporter](https://github.com/oracle/oracle-db-appdev-monitoring) +container image. + +```yaml +spec: + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.3" +``` + +### Custom Environment Variables, Arguments and Commands +The fields `spec.exporter.deployment.env`, `spec.exporter.deployment.args` and `spec.exporter.deployment.commands` are provided for adding custom environment variables, arguments (`args`) and commands to the containers. +Any custom environment variable will overwrite environment variables set by the controller. + +```yaml +spec: + exporter: + deployment: + env: + DB_ROLE: "" + TNS_ADMIN: "" + args: + - "--log.level=info" + commands: + - "/oracledb_exporter" +``` + + +### Custom Service Ports +The field `spec.exporter.service.ports` is provided to enable setting the ports of the service. If not set, then the following definition is set by default. + +```yaml +spec: + exporter: + service: + ports: + - name: metrics + port: 9161 + targetPort: 9161 + +``` + +### Custom ServiceMonitor Endpoints +The field `spec.prometheus.serviceMonitor.endpoints` is provided for providing custom endpoints for the ServiceMonitor resource created by the `databaseObserver`: + +```yaml +spec: + prometheus: + serviceMonitor: + endpoints: + - bearerTokenSecret: + key: '' + interval: 20s + port: metrics + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_endpoints_label_app + targetLabel: instance +``` + +## Mandatory roles and privileges requirements for Observability Controller + +The Observability controller issues the following policy rules for the following resources. Besides +databaseobserver resources, the controller manages its own service, deployment, pods and servicemonitor +and gets and lists configmaps and secrets. + +| Resources | Verbs | +|-------------------------------------------------------|-------------------------------------------| +| services | create delete get list patch update watch | +| deployments | create delete get list patch update watch | +| pods | create delete get list patch update watch | +| events | create delete get list patch update watch | +| services.apps | create delete get list patch update watch | +| deployments.apps | create delete get list patch update watch | +| pods.apps | create delete get list patch update watch | +| servicemonitors.monitoring.coreos.com | create delete get list patch update watch | +| databaseobservers.observability.oracle.com | create delete get list patch update watch | +| databaseobservers.observability.oracle.com/status | get patch update | +| configmaps | get list | +| secrets | get list | +| configmaps.apps | get list | +| databaseobservers.observability.oracle.com/finalizers | update | + +## Debugging and troubleshooting + +### Show the details of the resource +To obtain the verbose output of the current spec, use the following command: + +```sh +kubectl describe databaseobserver/database-observer-sample +``` + +If any error occurs during the reconciliation loop, then the Operator either reports +the error using the resource's event stream, or it will show the error under conditions. + +### Check the logs of the pod where the operator deploys +Follow these steps to check the logs. + +1. List the pod replicas + + ```sh + kubectl get pods -n oracle-database-operator-system + ``` + +2. Use the following command to check the logs of the deployment + + ```sh + kubectl logs deployment.apps/oracle-database-operator-controller-manager -n oracle-database-operator-system + ``` + +## Known Potential Issues + +| Issue | Example error | Potential Workaround | +|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| +| Pod may encounter error Permission denied when creating log file. Pod cannot access file system due to insufficient permissions | ```level=error msg="Failed to create the log file: /log/alert.log"``` | Configure securityContext in the spec, add your group ID to the `supplementalgroups` inside `spec.exporter.deployment.podTemplate.securityContext` field. | + + +## Resources +- [GitHub - Unified Observability for Oracle Database Project](https://github.com/oracle/oracle-db-appdev-monitoring) diff --git a/docs/ordsservices/README.md b/docs/ordsservices/README.md new file mode 100644 index 00000000..e2fa97be --- /dev/null +++ b/docs/ordsservices/README.md @@ -0,0 +1,98 @@ +# Oracle Rest Data Services (ORDSSRVS) Controller for Kubernetes - ORDS Life cycle management + + +## Description + +The ORDSRVS controller extends the Kubernetes API with a Custom Resource (CR) and Controller for automating Oracle Rest Data +Services (ORDS) lifecycle management. Using the ORDS controller, you can easily migrate existing, or create new, ORDS implementations +into an existing Kubernetes cluster. + +This controller allows you to run what would otherwise be an On-Premises ORDS middle-tier, configured as you require, inside Kubernetes with the additional ability of the controller to perform automatic ORDS/APEX install/upgrades inside the database. + +## Features Summary + +The custom RestDataServices resource supports the following configurations as a Deployment, StatefulSet, or DaemonSet: + +* Single OrdsSrvs resource with one database pool +* Single OrdsSrvs resource with multiple database pools* +* Multiple OrdsSrvs resources, each with one database pool +* Multiple OrdsSrvs resources, each with multiple database pools* + +*See [Limitations](#limitations) + +It supports the majority of ORDS configuration settings as per the [API Documentation](./api.md). + +The ORDS and APEX schemas can be [automatically installed/upgraded](./autoupgrade.md) into the Oracle Database by the ORDS controller. + +ORDS Version support: +* 24.1.1 +(Newer versions of ORDS will be supported in the next update of OraOperator) + +Oracle Database Version: +* 19c +* 23ai (incl. 23ai Free) + +### Prerequisites + +1. Oracle Database Operator + + Install the Oracle Database Operator (OraOperator) using the instructions in the [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + +1. Namespace + + For a dedicated namespace deployment of the ORDSSRVS controller, refer to the "Namespace Scoped Deployment" section in the OraOperator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md#2-namespace-scoped-deployment). + + The following examples deploy the controller to the 'ordsnamespace' namespace. + + Create the namespace: + ```bash + kubectl create namespace ordsnamespace + ``` + + Apply namespace role binding [ordsnamespace-role-binding.yaml](./examples/ordsnamespace-role-binding.yaml): + ```bash + kubectl apply -f ordsnamespace-role-binding.yaml + ``` + + Edit OraOperator to add the namespace under WATCH_NAMESPACE: + ```yaml + - name: WATCH_NAMESPACE + value: "default,,ordsnamespace" + ``` + +### Common configuration examples + +A few common configuration examples can be used to quickly familiarise yourself with the ORDS Custom Resource Definition. +The "Conclusion" section of each example highlights specific settings to enable functionality that maybe of interest. + +Before + +* [Pre-existing Database](./examples/existing_db.md) +* [Containerised Single Instance Database (SIDB)](./examples/sidb_container.md) +* [Multidatabase using a TNS Names file](./examples/multi_pool.md) +* [Autonomous Database using the OraOperator](./examples/adb_oraoper.md) *See [Limitations](#limitations) +* [Autonomous Database without the OraOperator](./examples/adb.md) +* [Oracle API for MongoDB Support](./examples/mongo_api.md) + +Running through all examples in the same Kubernetes cluster illustrates the ability to run multiple ORDS instances with a variety of different configurations. + +If you have a specific use-case that is not covered and would like it to be feel free to contribute it via a Pull Request. + +### Limitations + +When connecting to a mTLS enabled ADB and using the Oracontroller to retreive the Wallet, it is currently not supported to have multiple, different databases supported by the single RestDataServices resource. This is due to a requirement to set the `TNS_ADMIN` parameter at the Pod level ([#97](https://github.com/oracle/oracle-database-controller/issues/97)). + +### Troubleshooting +See [Troubleshooting](./TROUBLESHOOTING.md) + +## Contributing +See [Contributing to this Repository](./CONTRIBUTING.md) + +## Reporting a Security Issue + +See [Reporting security vulnerabilities](./SECURITY.md) + +## License + +Copyright (c) 2025 Oracle and/or its affiliates. +Released under the Universal Permissive License v1.0 as shown at [https://oss.oracle.com/licenses/upl/](https://oss.oracle.com/licenses/upl/) diff --git a/docs/ordsservices/TROUBLESHOOTING.md b/docs/ordsservices/TROUBLESHOOTING.md new file mode 100644 index 00000000..b1b5304d --- /dev/null +++ b/docs/ordsservices/TROUBLESHOOTING.md @@ -0,0 +1,129 @@ + + + +## TROUBLESHOOTING + +### Init container error + +Check the pod status and verify the init outcome + +---- +*Command:* +```bash +kubectl get pods -n +``` + +*Example:* +```bash +kubectl get pods -n ordsnamespace +NAME READY STATUS RESTARTS AGE +ords-multi-pool-55db776994-7rrff 0/1 Init:CrashLoopBackOff 6 (61s ago) 12m +``` +In case of error identify the *initContainer* name + +---- +*Command:* +```bash +kubectl get pod -n -o="custom-columns=NAME:.metadata.name,INIT-CONTAINERS:.spec.initContainers[*].name,CONTAINERS:.spec.containers[*].name" +``` + +Use the initContainers info to dump log information +**Command:** +```bash +kubectl logs -f --since=0 -n -c +``` + +*Example:* + +In this particular case we are providing wrong credential: "SYT" user does not exist + +```text +kubectl logs -f --since=0 ords-multi-pool-55db776994-m7782 -n ordsnamespace -c ords-multi-pool-init + +[..omissis...] +Running SQL... +Picked up JAVA_TOOL_OPTIONS: -Doracle.ml.version_check=false +BACKTRACE [24:09:17 08:59:03] + +filename:line function +------------- -------- +/opt/oracle/sa/bin/init_script.sh:115 run_sql +/opt/oracle/sa/bin/init_script.sh:143 check_adb +/opt/oracle/sa/bin/init_script.sh:401 main +SQLERROR: + USER = SYT + URL = jdbc:oracle:thin:@PDB2 + Error Message = 🔥ORA-01017: invalid username/password;🔥 logon denied +Pool: pdb2, Exit Code: 1 +Pool: pdb1, Exit Code: 1 +``` + +--- +*Diag shell* Use the following script to dump the container init log + +```bash +#!/bin/bash +NAMESPACE=${1:-"ordsnamespace"} +KUBECTL=/usr/bin/kubectl +for _pod in `${KUBECTL} get pods --no-headers -o custom-columns=":metadata.name" --no-headers -n ${NAMESPACE}` +do + for _podinit in `${KUBECTL} get pod ${_pod} -n ${NAMESPACE} -o="custom-columns=INIT-CONTAINERS:.spec.initContainers[*].name" --no-headers` + do + echo "DUMPINIT ${_pod}:${_podinit}" + ${KUBECTL} logs -f --since=0 ${_pod} -n ${NAMESPACE} -c ${_podinit} + done +done +``` + +## Ords init error + +Get pod name + +*Command:* +```bash +kubectl get pods -n +``` + +*Example:* +``` +kubectl get pods -n ordsnamespace +NAME READY STATUS RESTARTS AGE +ords-multi-pool-55db776994-m7782 1/1 Running 0 2m51s +``` +---- +Dump ords log + +*Commands:* +```bash +kubectl logs --since=0 -n +``` +*Example:* +```text +kubectl logs --since=0 ords-multi-pool-55db776994-m7782 -n ordsnamespace +[..omissis..] +2024-09-17T09:47:39.227Z WARNING The pool named: |pdb2|lo| is invalid and will be ignored: ORDS was unable to make a connection to the database. The database user specified by db.username configuration setting is locked. The connection pool named: |pdb2|lo| had the following error(s): 🔥ORA-28000: The account is locked.🔥 + +2024-09-17T09:47:39.370Z WARNING The pool named: |pdb1|lo| is invalid and will be ignored: ORDS was unable to make a connection to the database. The database user specified by db.username configuration setting is locked. The connection pool named: |pdb1|lo| had the following error(s): 🔥ORA-28000: The account is locked.🔥 + +2024-09-17T09:47:39.375Z INFO + +Mapped local pools from /opt/oracle/sa/config/databases: + /ords/pdb1/ => pdb1 => INVALID + /ords/pdb2/ => pdb2 => INVALID + + +2024-09-17T09:47:39.420Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 24.1.1.r1201228 +Oracle REST Data Services server info: jetty/10.0.20 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 11.0.15+8-LTS-149 +``` + +*Solution:* Connect to the container db to unlock the account + +```sql +alter user ORDS_PUBLIC_USER account unlock; +``` + + + + diff --git a/docs/ordsservices/api.md b/docs/ordsservices/api.md new file mode 100644 index 00000000..da4db09c --- /dev/null +++ b/docs/ordsservices/api.md @@ -0,0 +1,1388 @@ +# API Reference + +Packages: + +- [database.oracle.com/v1](#databaseoraclecomv1) + +# database.oracle.com/v1 + +Resource Types: + +- [OrdsSrvs](#ordssrvs) + + + + +## OrdsSrvs +[↩ Parent](#databaseoraclecomv1 ) + + + + + + +OrdsSrvs is the Schema for the ordssrvs API + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringdatabase.oracle.com/v1true
kindstringOrdsSrvstrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject + OrdsSrvsSpec defines the desired state of OrdsSrvs
+
false
statusobject + OrdsSrvsStatus defines the observed state of OrdsSrvs
+
false
+ + +### OrdsSrvs.spec +[↩ Parent](#ordssrvs) + + + +OrdsSrvsSpec defines the desired state of OrdsSrvs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
globalSettingsobject Contains settings that are configured across the entire +ORDS instance.
+
true
imagestring Specifies the ORDS container image
+
true
forceRestartboolean Specifies whether to restart pods when Global or Pool +configurations change
+
false
imagePullPolicyenum Specifies the ORDS container image pull policy
+
+Enum: IfNotPresent, Always, Never
+Default: IfNotPresent
+
false
imagePullSecretsstring Specifies the Secret Name for pulling the ORDS container +image
+
false
poolSettings< +a>[]object Contains settings for individual pools/databases
+
false
replicasinteger Defines the number of desired Replicas when workloadType +Deployment or StatefulSet
+
+Format: int32
+Default: 1
+Minimum: 1
+
false
workloadTypeenum Specifies the desired Kubernetes Workload
+
+Enum: Deployment, StatefulSet, DaemonSet
+Default: Deployment
+
false
encPrivKey
+
secret
+
secretName: string  passwordKey: +string Define the private key to decrypt passwords
+
true
+
+ +### OrdsSrvs.spec.globalSettings +[↩ Parent](#ordssrvsspec) + + + +Contains settings that are configured across the entire ORDS instance. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cache.metadata.enabledboolean + Specifies the setting to enable or disable metadata caching.
+
false
cache.metadata.graphql.expireAfterAccessinteger + Specifies the duration after a GraphQL schema is not accessed from the cache that it expires.
+
+ Format: int64
+
false
cache.metadata.graphql.expireAfterWriteinteger + Specifies the duration after a GraphQL schema is cached that it expires and has to be loaded again.
+
+ Format: int64
+
false
cache.metadata.jwks.enabledboolean + Specifies the setting to enable or disable JWKS caching.
+
false
cache.metadata.jwks.expireAfterAccessinteger + Specifies the duration after a JWK is not accessed from the cache that it expires. By default this is disabled.
+
+ Format: int64
+
false
cache.metadata.jwks.expireAfterWriteinteger + Specifies the duration after a JWK is cached, that is, it expires and has to be loaded again.
+
+ Format: int64
+
false
cache.metadata.jwks.initialCapacityinteger + Specifies the initial capacity of the JWKS cache.
+
+ Format: int32
+
false
cache.metadata.jwks.maximumSizeinteger + Specifies the maximum capacity of the JWKS cache.
+
+ Format: int32
+
false
cache.metadata.timeoutinteger + Specifies the setting to determine for how long a metadata record remains in the cache. Longer duration means, it takes longer to view the applied changes. The formats accepted are based on the ISO-8601 duration format.
+
+ Format: int64
+
false
certSecretobject + Specifies the Secret containing the SSL Certificates Replaces: standalone.https.cert and standalone.https.cert.key
+
false
database.api.enabledboolean + Specifies whether the Database API is enabled.
+
false
database.api.management.services.disabledboolean + Specifies to disable the Database API administration related services. Only applicable when Database API is enabled.
+
false
db.invalidPoolTimeoutinteger + Specifies how long to wait before retrying an invalid pool.
+
+ Format: int64
+
false
debug.printDebugToScreenboolean + Specifies whether to display error messages on the browser.
+
false
enable.mongo.access.logboolean + Specifies if HTTP request access logs should be enabled If enabled, logs will be written to /opt/oracle/sa/log/global
+
+ Default: false
+
false
enable.standalone.access.logboolean + Specifies if HTTP request access logs should be enabled If enabled, logs will be written to /opt/oracle/sa/log/global
+
+ Default: false
+
false
error.responseFormatstring + Specifies how the HTTP error responses must be formatted. html - Force all responses to be in HTML format json - Force all responses to be in JSON format auto - Automatically determines most appropriate format for the request (default).
+
false
feature.grahpql.max.nesting.depthinteger + Specifies the maximum join nesting depth limit for GraphQL queries.
+
+ Format: int32
+
false
icap.portinteger + Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. Either icap.port or icap.secure.port are required to have a value.
+
+ Format: int32
+
false
icap.secure.portinteger + Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. Either icap.port or icap.secure.port are required to have a value. If values for both icap.port and icap.secure.port are provided, then the value of icap.port is ignored.
+
+ Format: int32
+
false
icap.serverstring + Specifies the Internet Content Adaptation Protocol (ICAP) server name or IP address to virus scan files. The icap.server is required to have a value.
+
false
log.procedureboolean + Specifies whether procedures are to be logged.
+
false
mongo.enabledboolean + Specifies to enable the API for MongoDB.
+
false
mongo.idle.timeoutinteger + Specifies the maximum idle time for a Mongo connection in milliseconds.
+
+ Format: int64
+
false
mongo.op.timeoutinteger + Specifies the maximum time for a Mongo database operation in milliseconds.
+
+ Format: int64
+
false
mongo.portinteger + Specifies the API for MongoDB listen port.
+
+ Format: int32
+ Default: 27017
+
false
request.traceHeaderNamestring + Specifies the name of the HTTP request header that uniquely identifies the request end to end as it passes through the various layers of the application stack. In Oracle this header is commonly referred to as the ECID (Entity Context ID).
+
false
security.credentials.attemptsinteger + Specifies the maximum number of unsuccessful password attempts allowed. Enabled by setting a positive integer value.
+
+ Format: int32
+
false
security.credentials.lock.timeinteger + Specifies the period to lock the account that has exceeded maximum attempts.
+
+ Format: int64
+
false
security.disableDefaultExclusionListboolean + If this value is set to true, then the Oracle REST Data Services internal exclusion list is not enforced. Oracle recommends that you do not set this value to true.
+
false
security.exclusionListstring + Specifies a pattern for procedures, packages, or schema names which are forbidden to be directly executed from a browser.
+
false
security.externalSessionTrustedOriginsstring + Specifies to trust Access from originating domains
+
false
security.forceHTTPSboolean + Specifies to force HTTPS; this is set to default to false as in real-world TLS should terminiate at the LoadBalancer
+
false
security.httpsHeaderCheckstring + Specifies that the HTTP Header contains the specified text Usually set to 'X-Forwarded-Proto: https' coming from a load-balancer
+
false
security.inclusionListstring + Specifies a pattern for procedures, packages, or schema names which are allowed to be directly executed from a browser.
+
false
security.maxEntriesinteger + Specifies the maximum number of cached procedure validations. Set this value to 0 to force the validation procedure to be invoked on each request.
+
+ Format: int32
+
false
security.verifySSLboolean + Specifies whether HTTPS is available in your environment.
+
false
standalone.context.pathstring + Specifies the context path where ords is located.
+
+ Default: /ords
+
false
standalone.http.portinteger + Specifies the HTTP listen port.
+
+ Format: int32
+ Default: 8080
+
false
standalone.https.hoststring + Specifies the SSL certificate hostname.
+
false
standalone.https.portinteger + Specifies the HTTPS listen port.
+
+ Format: int32
+ Default: 8443
+
false
standalone.stop.timeoutinteger + Specifies the period for Standalone Mode to wait until it is gracefully shutdown.
+
+ Format: int64
+
false
+ + +### OrdsSrvs.spec.globalSettings.certSecret +[↩ Parent](#ordssrvsspecglobalsettings) + + + +Specifies the Secret containing the SSL Certificates Replaces: standalone.https.cert and standalone.https.cert.key + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certstring + Specifies the Certificate
+
true
keystring + Specifies the Certificate Key
+
true
secretNamestring + Specifies the name of the certificate Secret
+
true
+ + +### OrdsSrvs.spec.poolSettings[index] +[↩ Parent](#ordssrvsspec) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
db.secretobject + Specifies the Secret with the dbUsername and dbPassword values for the connection.
+
true
poolNamestring + Specifies the Pool Name
+
true
apex.security.administrator.rolesstring + Specifies the comma delimited list of additional roles to assign authenticated APEX administrator type users.
+
false
apex.security.user.rolesstring + Specifies the comma delimited list of additional roles to assign authenticated regular APEX users.
+
false
autoUpgradeAPEXboolean + Specify whether to perform APEX installation/upgrades automatically The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored This setting will be ignored for ADB
+
+ Default: false
+
false
autoUpgradeORDSboolean + Specify whether to perform ORDS installation/upgrades automatically The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored This setting will be ignored for ADB
+
+ Default: false
+
false
db.adminUserstring + Specifies the username for the database account that ORDS uses for administration operations in the database.
+
false
db.adminUser.secretobject + Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values for the database account that ORDS uses for administration operations in the database. replaces: db.adminUser.password
+
false
db.cdb.adminUserstring + Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management.
+
false
db.cdb.adminUser.secretobject + Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. Replaces: db.cdb.adminUser.password
+
false
db.connectionTypeenum + The type of connection.
+
+ Enum: basic, tns, customurl
+
false
db.credentialsSourceenum + Specifies the source for database credentials when creating a direct connection for running SQL statements. Value can be one of pool or request. If the value is pool, then the credentials defined in this pool is used to create a JDBC connection. If the value request is used, then the credentials in the request is used to create a JDBC connection and if successful, grants the requestor SQL Developer role.
+
+ Enum: pool, request
+
false
db.customURLstring + Specifies the JDBC URL connection to connect to the database.
+
false
db.hostnamestring + Specifies the host system for the Oracle database.
+
false
db.poolDestroyTimeoutinteger + Indicates how long to wait to gracefully destroy a pool before moving to forcefully destroy all connections including borrowed ones.
+
+ Format: int64
+
false
db.portinteger + Specifies the database listener port.
+
+ Format: int32
+
false
db.servicenamestring + Specifies the network service name of the database.
+
false
db.sidstring + Specifies the name of the database.
+
false
db.tnsAliasNamestring + Specifies the TNS alias name that matches the name in the tnsnames.ora file.
+
false
db.usernamestring + Specifies the name of the database user for the connection. For non-ADB this will default to ORDS_PUBLIC_USER For ADBs this must be specified and not ORDS_PUBLIC_USER If ORDS_PUBLIC_USER is specified for an ADB, the workload will fail
+
+ Default: ORDS_PUBLIC_USER
+
false
db.wallet.zip.servicestring + Specifies the service name in the wallet archive for the pool.
+
false
dbWalletSecretobject + Specifies the Secret containing the wallet archive containing connection details for the pool. Replaces: db.wallet.zip
+
false
debug.trackResourcesboolean + Specifies to enable tracking of JDBC resources. If not released causes in resource leaks or exhaustion in the database. Tracking imposes a performance overhead.
+
false
feature.openservicebroker.excludeboolean + Specifies to disable the Open Service Broker services available for the pool.
+
false
feature.sdwboolean + Specifies to enable the Database Actions feature.
+
false
http.cookie.filterstring + Specifies a comma separated list of HTTP Cookies to exclude when initializing an Oracle Web Agent environment.
+
false
jdbc.DriverTypeenum + Specifies the JDBC driver type.
+
+ Enum: thin, oci8
+
false
jdbc.InactivityTimeoutinteger + Specifies how long an available connection can remain idle before it is closed. The inactivity connection timeout is in seconds.
+
+ Format: int32
+
false
jdbc.InitialLimitinteger + Specifies the initial size for the number of connections that will be created. The default is low, and should probably be set higher in most production environments.
+
+ Format: int32
+
false
jdbc.MaxConnectionReuseCountinteger + Specifies the maximum number of times to reuse a connection before it is discarded and replaced with a new connection.
+
+ Format: int32
+
false
jdbc.MaxConnectionReuseTimeinteger + Sets the maximum connection reuse time property.
+
+ Format: int32
+
false
jdbc.MaxLimitinteger + Specifies the maximum number of connections. Might be too low for some production environments.
+
+ Format: int32
+
false
jdbc.MaxStatementsLimitinteger + Specifies the maximum number of statements to cache for each connection.
+
+ Format: int32
+
false
jdbc.MinLimitinteger + Specifies the minimum number of connections.
+
+ Format: int32
+
false
jdbc.SecondsToTrustIdleConnectioninteger + Sets the time in seconds to trust an idle connection to skip a validation test.
+
+ Format: int32
+
false
jdbc.auth.admin.rolestring + Identifies the database role that indicates that the database user must get the SQL Administrator role.
+
false
jdbc.auth.enabledboolean + Specifies if the PL/SQL Gateway calls can be authenticated using database users. If the value is true then this feature is enabled. If the value is false, then this feature is disabled. Oracle recommends not to use this feature. This feature used only to facilitate customers migrating from mod_plsql.
+
false
jdbc.cleanup.modestring + Specifies how a pooled JDBC connection and corresponding database session, is released when a request has been processed.
+
false
jdbc.statementTimeoutinteger + Specifies a timeout period on a statement. An abnormally long running query or script, executed by a request, may leave it in a hanging state unless a timeout is set on the statement. Setting a timeout on the statement ensures that all the queries automatically timeout if they are not completed within the specified time period.
+
+ Format: int32
+
false
misc.defaultPagestring + Specifies the default page to display. The Oracle REST Data Services Landing Page.
+
false
misc.pagination.maxRowsinteger + Specifies the maximum number of rows that will be returned from a query when processing a RESTful service and that will be returned from a nested cursor in a result set. Affects all RESTful services generated through a SQL query, regardless of whether the resource is paginated.
+
+ Format: int32
+
false
owa.trace.sqlboolean + If it is true, then it causes a trace of the SQL statements performed by Oracle Web Agent to be echoed to the log.
+
false
plsql.gateway.modeenum + Indicates if the PL/SQL Gateway functionality should be available for a pool or not. Value can be one of disabled, direct, or proxied. If the value is direct, then the pool serves the PL/SQL Gateway requests directly. If the value is proxied, the PLSQL_GATEWAY_CONFIG view is used to determine the user to whom to proxy.
+
+ Enum: disabled, direct, proxied
+
false
procedure.preProcessstring + Specifies the procedure name(s) to execute prior to executing the procedure specified on the URL. Multiple procedure names must be separated by commas.
+
false
procedure.rest.preHookstring + Specifies the function to be invoked prior to dispatching each Oracle REST Data Services based REST Service. The function can perform configuration of the database session, perform additional validation or authorization of the request. If the function returns true, then processing of the request continues. If the function returns false, then processing of the request is aborted and an HTTP 403 Forbidden status is returned.
+
false
procedurePostProcessstring + Specifies the procedure name(s) to execute after executing the procedure specified on the URL. Multiple procedure names must be separated by commas.
+
false
restEnabledSql.activeboolean + Specifies whether the REST-Enabled SQL service is active.
+
false
security.jwks.connection.timeoutinteger + Specifies the maximum amount of time before timing-out when accessing a JWK url.
+
+ Format: int64
+
false
security.jwks.read.timeoutinteger + Specifies the maximum amount of time reading a response from the JWK url before timing-out.
+
+ Format: int64
+
false
security.jwks.refresh.intervalinteger + Specifies the minimum interval between refreshing the JWK cached value.
+
+ Format: int64
+
false
security.jwks.sizeinteger + Specifies the maximum number of bytes read from the JWK url.
+
+ Format: int32
+
false
security.jwt.allowed.ageinteger + Specifies the maximum allowed age of a JWT in seconds, regardless of expired claim. The age of the JWT is taken from the JWT issued at claim.
+
+ Format: int64
+
false
security.jwt.allowed.skewinteger + Specifies the maximum skew the JWT time claims are accepted. This is useful if the clock on the JWT issuer and ORDS differs by a few seconds.
+
+ Format: int64
+
false
security.jwt.profile.enabledboolean + Specifies whether the JWT Profile authentication is available. Supported values:
+
false
security.requestAuthenticationFunctionstring + Specifies an authentication function to determine if the requested procedure in the URL should be allowed or disallowed for processing. The function should return true if the procedure is allowed; otherwise, it should return false. If it returns false, Oracle REST Data Services will return WWW-Authenticate in the response header.
+
false
security.requestValidationFunctionstring + Specifies a validation function to determine if the requested procedure in the URL should be allowed or disallowed for processing. The function should return true if the procedure is allowed; otherwise, return false.
+
+ Default: ords_util.authorize_plsql_gateway
+
false
security.validationFunctionTypeenum + Indicates the type of security.requestValidationFunction: javascript or plsql.
+
+ Enum: plsql, javascript
+
false
soda.defaultLimitstring + When using the SODA REST API, specifies the default number of documents returned for a GET request on a collection when a limit is not specified in the URL. Must be a positive integer, or "unlimited" for no limit.
+
false
soda.maxLimitstring + When using the SODA REST API, specifies the maximum number of documents that will be returned for a GET request on a collection URL, regardless of any limit specified in the URL. Must be a positive integer, or "unlimited" for no limit.
+
false
tnsAdminSecretobject + Specifies the Secret containing the TNS_ADMIN directory Replaces: db.tnsDirectory
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbUsername and dbPassword values for the connection. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.adminUser.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values for the database account that ORDS uses for administration operations in the database. replaces: db.adminUser.password + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.cdb.adminUser.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. Replaces: db.cdb.adminUser.password + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].dbWalletSecret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret containing the wallet archive containing connection details for the pool. Replaces: db.wallet.zip + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the Database Wallet Secret
+
true
walletNamestring + Specifies the Secret key name containing the Wallet
+
true
+ + +### OrdsSrvs.spec.poolSettings[index].tnsAdminSecret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret containing the TNS_ADMIN directory Replaces: db.tnsDirectory + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the TNS_ADMIN Secret
+
true
+ + +### OrdsSrvs.status +[↩ Parent](#ordssrvs) + + + +OrdsSrvsStatus defines the observed state of OrdsSrvs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
restartRequiredboolean + Indicates if the resource is out-of-sync with the configuration
+
true
conditions[]object +
+
false
httpPortinteger + Indicates the HTTP port of the resource exposed by the pods
+
+ Format: int32
+
false
httpsPortinteger + Indicates the HTTPS port of the resource exposed by the pods
+
+ Format: int32
+
false
mongoPortinteger + Indicates the MongoAPI port of the resource exposed by the pods (if enabled)
+
+ Format: int32
+
false
ordsVersionstring + Indicates the ORDS version
+
false
statusstring + Indicates the current status of the resource
+
false
workloadTypestring + Indicates the current Workload type of the resource
+
false
+ + +### OrdsSrvs.status.conditions[index] +[↩ Parent](#ordssrvsstatus) + + + +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // other fields } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
lastTransitionTimestring + lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+
+ Format: date-time
+
true
messagestring + message is a human readable message indicating details about the transition. This may be an empty string.
+
true
reasonstring + reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+
true
statusenum + status of the condition, one of True, False, Unknown.
+
+ Enum: True, False, Unknown
+
true
typestring + type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+
true
observedGenerationinteger + observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+
+ Format: int64
+ Minimum: 0
+
false
diff --git a/docs/ordsservices/autoupgrade.md b/docs/ordsservices/autoupgrade.md new file mode 100644 index 00000000..fddc30b3 --- /dev/null +++ b/docs/ordsservices/autoupgrade.md @@ -0,0 +1,57 @@ +# AutoUpgrade + +Each pool can be configured to automatically install and upgrade the ORDS and/or APEX schemas in the database. +The ORDS and APEX version is based on the ORDS image used for the RestDataServices resource. + +For example, in the below manifest: +* `Pool: pdb1` is configured to automatically install/ugrade both ORDS and APEX to version 24.1.0 +* `Pool: pdb2` will not install or upgrade ORDS/APEX + +As an additional requirement for `Pool: pdb1`, the `spec.poolSettings.db.adminUser` and `spec.poolSettings.db.adminUser.secret` +must be provided. If they are not, the `autoUpgrade` specification is ignored. + +```yaml +apiVersion: database.oracle.com/v1 +kind: OrdsSrvs +metadata: + name: ordspoc-server +spec: + image: container-registry.oracle.com/database/ords:24.1.0 + forceRestart: true + globalSettings: + database.api.enabled: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + poolSettings: + - poolName: pdb1 + autoUpgradeORDS: true + autoUpgradeAPEX: true + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//localhost:1521/PDB1 + db.secret: + secretName: pdb1-ords-auth + db.adminUser: SYS + db.adminUser.secret: + secretName: pdb1-sys-auth-enc + - poolName: pdb2 + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//localhost:1521/PDB2 + db.secret: + secretName: pdb2-ords-auth-enc +``` + +## Minimum Privileges for Admin User + +The `db.adminUser` must have privileges to create users and objects in the database. For Oracle Autonomous Database (ADB), this could be `ADMIN` while for +non-ADBs this could be `SYS AS SYSDBA`. When you do not want to use `ADMIN` or `SYS AS SYSDBA` to install, upgrade, validate and uninstall ORDS a script is provided +to create a new user to be used. + +1. Download the equivalent version of ORDS to the image you will be using. +1. Extract the software and locate: `scripts/installer/ords_installer_privileges.sql` +1. Using SQLcl or SQL*Plus, connect to the Oracle PDB with SYSDBA privileges. +1. Execute the following script providing the database user: + ```sql + @/path/to/installer/ords_installer_privileges.sql privuser + exit + ``` diff --git a/docs/ordsservices/examples/adb.md b/docs/ordsservices/examples/adb.md new file mode 100644 index 00000000..90a21b5c --- /dev/null +++ b/docs/ordsservices/examples/adb.md @@ -0,0 +1,104 @@ +# Example: Autonomous Database without the OraOperator + +This example walks through using the **ORDSSRVS controller** with an Oracle Autonomous Database. + +This assumes that an ADB has already been provisioned and is configured as "Secure Access from Anywhere". +Note that if behind a Proxy, this example will not work as the Wallet will need to be modified to support the proxy configuration. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### ADB Wallet Secret + +Download the ADB Wallet and create a Secret, replacing `` with the path to the wallet zip file: + +```bash +kubectl create secret generic adb-wallet \ + --from-file= -n ordsnamespace +``` + +### ADB ADMIN Password Secret + +Create a Secret for the ADB ADMIN password, replacing with the real password: + +```bash +echo ${ADMIN_PASSWORD} > adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_adb-db-auth-enc +kubectl create secret generic adb-oraoper-db-auth-enc --from-file=password=e_adb-db-auth-enc -n ordsnamespace +rm adb-db-auth-enc e_adb-db-auth-enc +``` + +### Create RestDataServices Resource + +1. Create a manifest for ORDS. + + As an ADB already maintains ORDS and APEX, `autoUpgradeORDS` and `autoUpgradeAPEX` will be ignored if set. A new DB User for ORDS will be created to avoid conflict with the pre-provisioned one. This user will be + named, `ORDS_PUBLIC_USER_OPER` if `db.username` is either not specified or set to `ORDS_PUBLIC_USER`. + + Replace with the ADB Name and ensure that the `db.wallet.zip.service` is valid for your ADB Workload (e.g. _TP or _HIGH, etc.): + + ```yaml + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-adb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: adb + restEnabledSql.active: true + plsql.gateway.mode: direct + db.wallet.zip.service: _TP + dbWalletSecret: + secretName: adb-wallet + walletName: Wallet_.zip + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER_OPER + db.secret: + secretName: adb-oraoper-db-auth-enc + db.adminUser: ADMIN + db.adminUser.secret: + secretName: adb-oraoper-db-auth-enc + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get -n ordsnamespace ordssrvs ords-adb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-adb -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords/adb` + +## Conclusion + +This example has a single database pool, named `adb`. It is set to: + +* Not automatically restart when the configuration changes: `forceRestart` is not set. + The pod must be manually resarted for new configurations to be picked-up. +* Automatically install/update ORDS on startup, if required. This occurs due to the database being detected as an ADB. +* Automatically install/update APEX on startup, if required: This occurs due to the database being detected as an ADB. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS +* Use the ADB Wallet file to connect to the database: `db.wallet.zip.service: adbpoc_tp` and `dbWalletSecret` \ No newline at end of file diff --git a/docs/ordsservices/examples/adb_oraoper.md b/docs/ordsservices/examples/adb_oraoper.md new file mode 100644 index 00000000..253365c5 --- /dev/null +++ b/docs/ordsservices/examples/adb_oraoper.md @@ -0,0 +1,162 @@ +# Example: Autonomous Database using the OraOperator + +This example walks through using the **ORDS Controller** with a Containerised Oracle Database created by the **ADB Controller** in the same Kubernetes Cluster. + +When connecting to a mTLS enabled ADB while using the OraOperator to retreive the Wallet as is done in the example, it is currently not supported to have multiple, different databases supported by the single Ordssrvs resource. This is due to a requirement to set the `TNS_ADMIN` parameter at the Pod level ([#97](https://github.com/oracle/oracle-database-operator/issues/97)). + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### Setup Oracle Cloud Authorisation + +In order for the OraOperator to access the ADB, some additional pre-requisites are required, as detailed [here](https://github.com/oracle/oracle-database-operator/blob/main/docs/adb/ADB_PREREQUISITES.md). +Either establish Instance Principles or create the required ConfigMap/Secret. This example uses the later, using the helper script [set_ocicredentials.sh](https://github.com/oracle/oracle-database-operator/blob/main/set_ocicredentials.sh) : + +```bash +./set_ocicredentials.sh run -n ordsnamespace +``` + +### ADB ADMIN Password Secret + +Create a Secret for the ADB Admin password: + +```bash +DB_PWD=$(echo "ORDSpoc_$(date +%H%S%M)") + +kubectl create secret generic adb-oraoper-db-auth \ + -n ordsnamespace \ + --from-literal=adb-oraoper-db-auth=${DB_PWD} +``` + +**NOTE**: When binding to the ADB in a later step, the OraOperator will change the ADB password to what is specified in the Secret. + +### Bind the OraOperator to the ADB + +1. Obtain the OCID of the ADB and set to an environment variable: + + ```bash + export ADB_OCID= + ``` + +1. Create and apply a manifest to bind to the ADB. + "adb-oraoper-tns-admin" secret will be created by the controller. + + ```yaml + apiVersion: database.oracle.com/v4 + kind: AutonomousDatabase + metadata: + name: adb-oraoper + namespace: ordsnamespace + spec: + action: Sync + wallet: + name: adb-oraoper-tns-admin + password: + k8sSecret: + name: adb-oraoper-db-auth + details: + id: $ADB_OCID + ``` + +1. Update the ADMIN Password: + + ```bash + kubectl patch adb adb-oraoper --type=merge \ + -n ordsnamespace \ + -p '{"spec":{"details":{"adminPassword":{"k8sSecret":{"name":"adb-oraoper-db-auth"}}}}}' + ``` + +1. Watch the `adb` resource until the STATE is **AVAILABLE**: + + ```bash + kubectl get -n ordsnamespace adb/adb-oraoper -w + ``` + +### Create encrypted password + +```bash +echo ${DB_PWD} > adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_adb-db-auth-enc +kubectl create secret generic adb-oraoper-db-auth-enc --from-file=password=e_adb-db-auth-enc -n ordsnamespace +rm adb-db-auth-enc e_adb-db-auth-enc +``` + +### Create OrdsSrvs Resource + +1. Obtain the Service Name from the OraOperator + + ```bash + SERVICE_NAME=$(kubectl get -n ordsnamespace adb adb-oraoper -o=jsonpath='{.spec.details.dbName}'_TP) + ``` + +1. Create a manifest for ORDS. + + As an ADB already maintains ORDS and APEX, `autoUpgradeORDS` and `autoUpgradeAPEX` will be ignored if set. A new DB User for ORDS will be created to avoid conflict with the pre-provisioned one. This user will be + named, `ORDS_PUBLIC_USER_OPER` if `db.username` is either not specified or set to `ORDS_PUBLIC_USER`. + + ```yaml + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-adb-oraoper + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: adb-oraoper + db.connectionType: tns + db.tnsAliasName: $SERVICE_NAME + tnsAdminSecret: + secretName: adb-oraoper-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER_OPER + db.secret: + secretName: adb-oraoper-db-auth-enc + db.adminUser: ADMIN + db.adminUser.secret: + secretName: adb-oraoper-db-auth-enc + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-adb-oraoper -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-adb-oraoper -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords/adb-oraoper` + +## Conclusion + +This example has a single database pool, named `adb-oraoper`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required. This occurs due to the database being detected as an ADB. +* Automatically install/update APEX on startup, if required: This occurs due to the database being detected as an ADB. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS +* Use a TNS connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` + The `tnsAdminSecret` Secret `adb-oraoper-tns-admin` was created by the OraOperator +* The `passwordKey` has been specified for both `db.secret` and `db.adminUser.secret` as `adb-oraoper-password` to match the OraOperator specification. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS \ No newline at end of file diff --git a/docs/ordsservices/examples/existing_db.md b/docs/ordsservices/examples/existing_db.md new file mode 100644 index 00000000..6d4791ae --- /dev/null +++ b/docs/ordsservices/examples/existing_db.md @@ -0,0 +1,112 @@ +# Example: Pre-existing Database + +This example walks through configuring the ORDS Controller to use either a database deployed within Kubernetes, or an existing database external to your cluster. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### Database Access + +This example assumes you have a running, accessible Oracle Database. + +```bash +export CONN_STRING=:/ +``` + +### Create encrypted secrets + +```bash +DB_PWD= + +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > db-auth +openssl rsautl -encrypt -pubin -inkey public.pem -in db-auth |base64 > e_db-auth-enc +kubectl create secret generic db-auth-enc --from-file=password=e_db-auth-enc -n ordsnamespace + +rm db-auth e_db-auth-enc + +``` + +### Create ordssrvs Resource + +1. Create a manifest for ORDS. + + This example assumes APEX is already installed in the database. + + The following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + * `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-db + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + restEnabledSql.active: true + plsql.gateway.mode: direct + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: db-auth-enc + " > ords-db.yaml + + kubectl apply -f ords-db.yaml + ``` + + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -o custom-columns=NAME:.metadata.name -n ordsnamespace --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-db -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords` + + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/examples/mongo_api.md b/docs/ordsservices/examples/mongo_api.md new file mode 100644 index 00000000..f0fd0cf5 --- /dev/null +++ b/docs/ordsservices/examples/mongo_api.md @@ -0,0 +1,158 @@ +# Example: Oracle API for MongoDB Support + +This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database to enable MongoDB API Support. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### Database Access + +This example assumes you have a running, accessible Oracle Database. For demonstration purposes, +the [Containerised Single Instance Database using the OraOperator](sidb_container.md) will be used. + +### Rest Enable a Schema + +In the database, create an ORDS-enabled user. As this example uses the [Containerised Single Instance Database using the OraOperator](sidb_container.md), the following was performed: + + +1. Connect to the database: + + ```bash + DB_PWD=$(kubectl get secrets sidb-db-auth --template='{{.data.password | base64decode}}') + POD_NAME=$(kubectl get pod -l "app=oraoper-sidb" -o custom-columns=NAME:.metadata.name --no-headers) + kubectl exec -it ${POD_NAME} -- sqlplus SYSTEM/${DB_PWD}@FREEPDB1 + ``` + +1. Create the User: + ```sql + create user MONGO identified by "My_Password1!"; + grant soda_app, create session, create table, create view, create sequence, create procedure, create job, + unlimited tablespace to MONGO; + -- Connect as new user + conn MONGO/My_Password1!@FREEPDB1; + exec ords.enable_schema; + ``` + +### Create encrypted secrets + +```bash + +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > sidb-db-auth-enc +openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth-enc |base64 > e_sidb-db-auth-enc +kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth-enc -n ordsnamespace +rm sidb-db-auth-enc e_sidb-db-auth-enc +``` + +### Create ordssrvs Resource + +1. Retrieve the Connection String from the containerised SIDB. + + ```bash + CONN_STRING=$(kubectl get singleinstancedatabase oraoper-sidb \ + -o jsonpath='{.status.pdbConnectString}') + + echo $CONN_STRING + ``` + +1. Create a manifest for ORDS. + + As the DB in the Free image does not contain ORDS (or APEX), the following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + + The `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-sidb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + mongo.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + restEnabledSql.active: true + plsql.gateway.mode: direct + jdbc.MaxConnectionReuseCount: 5000 + jdbc.MaxConnectionReuseTime: 900 + jdbc.SecondsToTrustIdleConnection: 1 + jdbc.InitialLimit: 100 + jdbc.MaxLimit: 100 + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: sidb-db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: sidb-db-auth-enc" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -o custom-columns=NAME:.metadata.name -n ordsnamespace --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +1. Open a port-forward to the MongoAPI service, for example: + ```bash + kubectl port-forward service/ords-sidb 27017:27017 -n ordsnamespace + ``` + +1. Connect to ORDS using the MongoDB shell: + ```bash + mongosh --tlsAllowInvalidCertificates 'mongodb://MONGO:My_Password1!@localhost:27017/MONGO?authMechanism=PLAIN&authSource=$external&tls=true&retryWrites=false&loadBalanced=true' + ``` + +1. Insert some data: + ```txt + db.createCollection('emp'); + db.emp.insertOne({"name":"Blake","job": "Intern","salary":30000}); + db.emp.insertOne({"name":"Miller","job": "Programmer","salary": 70000}); + db.emp.find({"name":"Miller"}); + ``` + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) +* The MongoAPI service has been enabled: `mongo.enabled: true` +* The MongoAPI service will default to port: `27017` as the property: `mongo.port` has been left undefined +* A number of JDBC parameters were set at the pool level for achieving high performance: + * `jdbc.MaxConnectionReuseCount: 5000` + * `jdbc.MaxConnectionReuseTime: 900` + * `jdbc.SecondsToTrustIdleConnection: 1` + * `jdbc.InitialLimit: 100` + * `jdbc.MaxLimit: 100` diff --git a/docs/ordsservices/examples/multi_pool.md b/docs/ordsservices/examples/multi_pool.md new file mode 100644 index 00000000..ffb537bf --- /dev/null +++ b/docs/ordsservices/examples/multi_pool.md @@ -0,0 +1,200 @@ +# Example: Multipool, Multidatabase using a TNS Names file + +This example walks through using the **ORDSSRVS Operator** with multiple databases using a TNS Names file. +Keep in mind that all pools are running in the same Pod, therefore, changing the configuration of one pool will require +a recycle of all pools. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + + +### TNS_ADMIN Secret + +Create a Secret with the contents of the TNS_ADMIN directory. This can be a single `tnsnames.ora` file or additional files such as `sqlnet.ora` or `ldap.ora`. +The example shows using a `$TNS_ADMIN` enviroment variable which points to a directory with valid TNS_ADMIN files. + +To create a secret with all files in the TNS_ADMIN directory: +```bash +kubectl create secret generic multi-tns-admin \ + --from-file=$TNS_ADMIN +``` + +To create a secret with just the tnsnames.ora file: +```bash +kubectl create secret generic multi-tns-admin \ + --from-file=$TNS_ADMIN/tnsnames.ora +``` + +In this example, 4 PDBs will be connected to and the example `tnsnames.ora` file contents are as below: +```text +PDB1=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.1)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB1))) + +PDB2=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.2)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB2))) + +PDB3=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.3)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB3))) + +PDB4=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.4)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB4))) +``` + +### PRIVATE KEY SECRET + +Secrets are encrypted using openssl rsa algorithm. Create public and private key. +Use private key to create a secret. + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +``` + +### ORDS_PUBLIC_USER Secret + +Create a Secret for each of the databases `ORDS_PUBLIC_USER` user. +If multiple databases use the same password, the same secret can be re-used. + +The following secret will be used for PDB1: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic pdb1-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +The following secret will be used for PDB2: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic pdb2-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +The following secret will be used for PDB3 and PDB4: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic multi-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +### Privileged Secret (*Optional) + +If taking advantage of the [AutoUpgrade](../autoupgrade.md) functionality, create a secret for a user with the privileges to modify the ORDS and/or APEX schemas. + +In this example, only PDB1 will be set for [AutoUpgrade](../autoupgrade.md), the other PDBs already have APEX and ORDS installed. + +```bash +echo "THIS_IS_A_PASSWORD" > syspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in syspwdfile |base64 > e_syspwdfile +kubectl create secret generic pdb1-priv-auth-enc --from-file=password=e_syspwdfile -n ordsnamespace +rm syspwdfile e_syspwdfile +``` + +### Create OrdsSrvs Resource + +1. Create a manifest for ORDS, ords-multi-pool.yaml: + + ```yaml + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-multi-pool + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: pdb1 + autoUpgradeORDS: true + autoUpgradeAPEX: true + db.connectionType: tns + db.tnsAliasName: PDB1 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: pdb1-ords-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: pdb1-priv-auth-enc + - poolName: pdb2 + db.connectionType: tns + db.tnsAliasName: PDB2 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: pdb2-ords-auth-enc + - poolName: pdb3 + db.connectionType: tns + db.tnsAliasName: PDB3 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: multi-ords-auth-enc + - poolName: pdb4 + db.connectionType: tns + db.tnsAliasName: PDB4 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: multi-ords-auth-enc + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Apply the yaml file: + ```bash + kubectl apply -f ords-multi-pool.yaml + ``` + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get OrdsSrvs ords-multi-pool -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. As APEX + is being installed for the first time by the Operator into PDB1, it will remain in the **Preparing** + status for an additional 5-10 minutes. + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-multi-pool -n ordsnamespace 8443:8443 +``` + +1. For PDB1, direct your browser to: `https://localhost:8443/ords/pdb1` +1. For PDB2, direct your browser to: `https://localhost:8443/ords/pdb2` +1. For PDB3, direct your browser to: `https://localhost:8443/ords/pdb3` +1. For PDB4, direct your browser to: `https://localhost:8443/ords/pdb4` + +## Conclusion + +This example has multiple pools, named `pdb1`, `pdb2`, `pdb3`, and `pdb4`. + +* They all share the same `tnsAdminSecret` to connect using thier individual `db.tnsAliasName` +* They will all automatically restart when the configuration changes: `forceRestart: true` +* Only the `pdb1` pool will automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Only the `pdb1` pool will automatically install/update APEX on startup, if required: `autoUpgradeAPEX: true` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/examples/ordsnamespace-role-binding.yaml b/docs/ordsservices/examples/ordsnamespace-role-binding.yaml new file mode 100644 index 00000000..018d8934 --- /dev/null +++ b/docs/ordsservices/examples/ordsnamespace-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ordsnamespace-oracle-database-operator-manager-rolebinding + namespace: ordsnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/ordsservices/examples/sidb_container.md b/docs/ordsservices/examples/sidb_container.md new file mode 100644 index 00000000..3cda09ea --- /dev/null +++ b/docs/ordsservices/examples/sidb_container.md @@ -0,0 +1,152 @@ +# Example: Containerised Single Instance Database using the OraOperator + +This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database created by the **SIDB Controller** in the same Kubernetes Cluster. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### Deploy a Containerised Oracle Database + +Refer to Single Instance Database (SIDB) [README](https://github.com/oracle/oracle-database-operator/blob/main/docs/sidb/README.md) for details. + +1. Create a Secret for the Database password: + + ```bash + DB_PWD= + kubectl create secret generic sidb-db-auth --from-literal=password=${DB_PWD} --namespace ordsnamespace + ``` +1. Create a manifest for the containerised Oracle Database. + + The POC uses an Oracle Free Image, but other versions may be subsituted; review the OraOperator Documentation for details on the manifests. + + ```yaml + apiVersion: database.oracle.com/v4 + kind: SingleInstanceDatabase + metadata: + name: oraoper-sidb + namespace: ordsnamespace + spec: + edition: free + adminPassword: + secretName: sidb-db-auth + image: + pullFrom: container-registry.oracle.com/database/free:23.7.0.0 + prebuiltDB: true + replicas: 1 + ``` + latest container-registry.oracle.com/database/free version, **23.7.0.0-lite**, valid as of **2-May-2025** + + +1. Watch the `singleinstancedatabases` resource until the database status is **Healthy**: + + ```bash + kubectl get singleinstancedatabases/oraoper-sidb -w -n ordsnamespace + ``` + **NOTE**: If this is the first time pulling the free database image, it may take up to 15 minutes for the database to become available. + +### Create encryped secret + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > sidb-db-auth +openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth |base64 > e_sidb-db-auth +kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth -n ordsnamespace +rm sidb-db-auth e_sidb-db-auth +``` + + +### Create RestDataServices Resource + +1. Retrieve the Connection String from the containerised SIDB. + + ```bash + CONN_STRING=$(kubectl get singleinstancedatabase oraoper-sidb \ + -n ordsnamespace \ + -o jsonpath='{.status.pdbConnectString}') + + echo $CONN_STRING + ``` + +1. Create a manifest for ORDS. + + As the DB in the Free image does not contain ORDS (or APEX), the following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `autoUpgradeAPEX` - Boolean; when true the APEX will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + + The `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-sidb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + autoUpgradeAPEX: true + restEnabledSql.active: true + plsql.gateway.mode: direct + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: sidb-db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: sidb-db-auth-enc + " > ords-sidb.yaml + + kubectl apply -f ords-sidb.yaml + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -n ordsnamespace -o custom-columns=NAME:.metadata.name --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-sidb -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords` + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Automatically install/update APEX on startup, if required: `autoUpgradeAPEX: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/usecase01/create_mong_schema.sql b/docs/ordsservices/usecase01/create_mong_schema.sql new file mode 100644 index 00000000..a00ee441 --- /dev/null +++ b/docs/ordsservices/usecase01/create_mong_schema.sql @@ -0,0 +1,9 @@ +drop user MONGO cascade; +set echo on +set head on +create user MONGO identified by "My_Password1!"; +grant soda_app, create session, create table, create view, create sequence, create procedure, create job, +unlimited tablespace to MONGO; +conn MONGO/My_Password1!@158.180.233.248:30001/FREEPDB1 +exec ords.enable_schema; +exit; diff --git a/docs/ordsservices/usecase01/help b/docs/ordsservices/usecase01/help new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/ordsservices/usecase01/help @@ -0,0 +1 @@ + diff --git a/docs/ordsservices/usecase01/makefile b/docs/ordsservices/usecase01/makefile new file mode 100644 index 00000000..76b47210 --- /dev/null +++ b/docs/ordsservices/usecase01/makefile @@ -0,0 +1,778 @@ +# +# Copyright (c) 2006, 2024, Oracle and/or its affiliates. +# +# +# NAME +# makefile: +# This makefile helps to set up multipool and sidb cases +# edit the following variables with your system information +# and execute make help to list the list of avilable targets +# + +export PDB1=pdb1 +export PDB2=pdb2 +export TNS1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$(PDB1)))) +export TNS2=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$(PDB2)))) +export SIDB_PASSWORD=....write password here .... +export PDB1_PWD=.....write password here.... +export PDB2_PWD=.....write password.... +export ORDS_MULTI_POOL_PWD=....write password here.... +export USER_CONTAINER_REGISTRY=username@oracle.com <--Your OCR account +export OPRNAMESPACE=oracle-database-operator-system +export ORDSNAMESPACE=ordsnamespace + + +# +# DESCRIPTION +# Main makefile - see target table +# +# | Target | Description | +# +-------------+--------------------------------------------------------+ +# | step0a | create_ordsnamespace.yaml | +# +-------------+--------------------------------------------------------+ +# | step1a | setup certmaneger | +# +-------------+--------------------------------------------------------+ +# | step2a | setup operator oracle-database-operator.yaml | +# +-------------+--------------------------------------------------------+ +# | step3a | default scoped deployment default-ns-role-binding.yaml | +# +-------------+--------------------------------------------------------+ +# | step4a | node - persistent volume - storage class for the db | +# +-------------+--------------------------------------------------------+ +# | step5a | setup secrets | +# +----------------------------------------------------------------------+ +# | step6a | setup secrets for OCR | +# +----------------------------------------------------------------------+ +# | step7a | setup sidb | +# +----------------------------------------------------------------------+ +# | step8a | ⭐Setup REST SERVER ⭐ | +# +-------------+--------------------------------------------------------+ +# +# step[1-7]a are required to start mongodb API rest server +# +# step[9-11] test mongo API +# +-------------+--------------------------------------------------------+ +# | step9 | configure a mongo db user on sidb | +# +-------------+--------------------------------------------------------+ +# | step10 | ⭐Setup REST SERVER FOR MONGO API ⭐ | +# +-------------+--------------------------------------------------------+ +# | step11 | Test Mongo API | +# +-------------+--------------------------------------------------------+ +# +# step[12- ] test multi tns configuration +# +-------------+--------------------------------------------------------+ +# | step12 | create tns secret | +# +-------------+--------------------------------------------------------+ +# | step13 | create passwords secret | +# +-------------+--------------------------------------------------------+ +# | step14 | ⭐SetupMulti Rest Server ⭐ | +# +-------------+--------------------------------------------------------+ +# + + + +export WATCHLIST=$(OPRNAMESPACE),$(ORDSNAMESPACE) +export CREATE_SINGLEINSTANCE=create_singleinstance_db.yaml +export CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +export SIDB_SECRET=sidb-db-auth +export ORDS_SECRET=ords-db-auth +export MULTI_ORDS_AUTH_SECRET=multi-ords-auth-enc +export PDB1_PRIV_AUTH_SECRET=pdb1-priv-auth-enc +export PDB2_PRIV_AUTH_SECRET=pdb2-priv-auth-enc + + +export SIDB_IMAGE=container-registry.oracle.com/database/free:23.4.0.0 +export ORDS_IMAGE=container-registry.oracle.com/database/ords:24.1.0 +export ORDS_IMAGE.1=container-registry.oracle.com/database/ords:24.1.1 +export SECRET_CONTAINER_REGISTRY=oracle-container-registry-secret +export ORACLE_CONTAINER_REGISTRY=container-registry.oracle.com +export REST_SERVER_NAME=ords-sidb +export REST_SERVER_NAME_MONGO=ords-sidb-mongo +export MONGOSH=mongosh-2.3.1-linux-x64 +export KIND=OrdsSrvs + +export TNSNAMES=./tnsnames.ora +export TNSADMIN=`pwd`/tnsadmin +export PRVKEY=ca.key +export PUBKEY=public.pem + +## CMD SECTION## +export KUBECTL=/usr/local/go/bin/kubectl +export DIFF=/usr/bin/diff +export MAKE=/usr/bin/make +export CURL=/usr/bin/curl +export TAR=/usr/bin/tar +export OPENSSL=/usr/bin/openssl + +## YAML AND OTHER FILES ## +export CREATE_ORDSNAMESPACE=create_$(ORDSNAMESPACE).yaml +export DEFAULT_NAMESPACE_SCOPE=default-ns-role-binding.yaml +export RST_NAMESPACE_SCOPE=ords-ns-role-binding.yaml +export ORACLE_OPERATOR_YAML=../../../oracle-database-operator.yaml +export NODE_RBAC=node-rbac.yaml +export STORAGE_CLASS_RBAC=storage-class-rbac.yaml +export PERSISTENT_VOLUME_RBAC=persistent-volume-rbac.yaml +export SIDB_CREATION=sidb_create.yaml +export SECRET_CONTAINER_REGISTRY_SCRIPT=create_registry_secret.sh +export REST_SERVER_CREATION=rest_server_creation.yaml +export REST_SERVER_CREATION_MONGO=rest_server_creation_mongo.yaml +export MULTISRV_MANIFEST=create_multisrv.yaml +export MONGOORADBUSER=MONGO + + +MAKEFILE=./makefile +.ONESHELL: + +define manpage +@printf "\n" +@printf "\033[7m%s\033[0m \033[7m%s\033[0m \033[7m%s\033[0m\n" "TARGET " "DESCRIPTION " "YAML FILE " +@printf "%s %s %s\n" "---------" " --------------------------------------------------" "--------------------------------------" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step0/a/d setup new namespace" " " "$(CREATE_ORDSNAMESPACE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step1/a/d setup certmaneger " " " "$(CERTMANAGER)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step2/a/d setup operator" " " "$(shell basename $(ORACLE_OPERATOR_YAML))" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step3/a/d default scoped deployment" " " "$(DEFAULT_NAMESPACE_SCOPE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " ords scoped deployment" " " "$(RST_NAMESPACE_SCOPE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step4/a/d node rbac" " " "$(NODE_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " storage class " " " "$(STORAGE_CLASS_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " persistent volume " " " "$(PERSISTENT_VOLUME_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step5/a/d setup db secret" " " "n/a" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step6/a/d setup registry secret" " " "$(SECRET_CONTAINER_REGISTRY_SCRIPT)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step7/a/d setup sidb " " " "$(SIDB_CREATION)" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step8/a/d setup RestServer " " " "$(REST_SERVER_CREATION)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step9/-/- configure " " " "Mongo ora db user:$(MONGOORADBUSER)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step10/a/d setup RestServer Mongo " " " "$(REST_SERVER_CREATION_MONGO)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step11/-/- test mongodb API " " " "----" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step12/a/d create secret for tnsadmin " " " "$(TNSADMIN)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step13/a/d create secrets for adminusers" " " "---" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step14/a/d/e setup Multi Ords services " " " "---" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "diagordsinit" "" "🔬dump initpod logs" + + + +@printf "================================================\n" +@printf " a=apply d=delete ⚡e=generate error ⚡\n" +@printf "\n" +endef + + +help:man +man: + $(call manpage) + +define namespace +cat< $(CREATE_ORDSNAMESPACE) +#apiVersion: v1 +#kind: Namespace +#metadata: +# labels: +# control-plane: controller-manager +# name: $(2) +EOF +$(KUBECTL) $(1) -f $(CREATE_ORDSNAMESPACE) +$(KUBECTL) get namespace +endef + +step0: + $(call namespace,$(ACTION),$(ORDSNAMESPACE)) +step0a: + $(MAKE) -f $(MAKEFILE) step0 ACTION=apply +step0d: + $(MAKE) -f $(MAKEFILE) step0 ACTION=delete + +step1: + $(KUBECTL) $(ACTION) -f $(CERTMANAGER) +step1a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step1 + $(KUBECTL) get pod -n cert-manager +step1d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step1 + + +define setwatchnamespace +@echo "Setting watch namespace list: $(WATCHLIST)" +sed 's/value: ""/value: "$(WATCHLIST)"/g' $(ORACLE_OPERATOR_YAML) > `basename $(ORACLE_OPERATOR_YAML)` +$(KUBECTL) $(1) -f `basename $(ORACLE_OPERATOR_YAML)` +$(DIFF) $(ORACLE_OPERATOR_YAML) `basename $(ORACLE_OPERATOR_YAML)` +$(KUBECTL) get pods -n $(OPRNAMESPACE) +endef + +step2: + $(call setwatchnamespace,$(ACTION)) +step2a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step2 +step2d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step2 + + +define namespacescpe +cat<$(RST_NAMESPACE_SCOPE) +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: RoleBinding +#metadata: +# name: $(ORDSNAMESPACE)-rolebinding +# namespace: $(ORDSNAMESPACE) +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +cat< $(DEFAULT_NAMESPACE_SCOPE) +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: RoleBinding +#metadata: +# name: oracle-database-operator-oracle-database-operator-manager-rolebinding +# namespace: default +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +$(KUBECTL) $(1) -f $(RST_NAMESPACE_SCOPE) +$(KUBECTL) $(1) -f $(DEFAULT_NAMESPACE_SCOPE) +$(KUBECTL) get RoleBinding -n $(ORDSNAMESPACE) + +endef + +step3: + $(call namespacescpe,$(ACTION)) + +step3a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step3 + +step3d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step3 + + +export NODE_RBAC=node-rbac.yaml +export STORAGE_CLASS_RBAC=storage-class-rbac.yaml +export PERSISTENT_VOLUME_RBAC=persistent-volume-rbac.yaml + + +define persistenvolume + +cat<$(NODE_RBAC) +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-node +#rules: +#- apiGroups: +# - "" +# resources: +# - nodes +# verbs: +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-node-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-node +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +cat<$(STORAGE_CLASS_RBAC) +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-storage-class +#rules: +#- apiGroups: +# - storage.k8s.io +# resources: +# - storageclasses +# verbs: +# - get +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-storage-class-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-storage-class +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +#--- +EOF + +cat<$(PERSISTENT_VOLUME_RBAC) +# +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-persistent-volume +#rules: +#- apiGroups: +# - "" +# resources: +# - persistentvolumes +# verbs: +# - get +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-persistent-volume-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-persistent-volume +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +#--- +# +EOF + +$(KUBECTL) $(1) -f $(NODE_RBAC) +$(KUBECTL) $(1) -f $(STORAGE_CLASS_RBAC) +$(KUBECTL) $(1) -f $(PERSISTENT_VOLUME_RBAC) + +endef + +step4: + $(call persistenvolume,$(ACTION)) +step4a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step4 +step4d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step4 + + +export SYSPWDFILE1=syspwdfile +export ORDPWDFILE=ordspwdfile +export SIDB_PASSWORD_FILE=sidbpasswordfile + +export PRVKEY=ca.key +export PUBKEY=public.pem +export OPENSSL=/usr/bin/openssl + +step5a: + echo $(SIDB_PASSWORD) > $(SIDB_PASSWORD_FILE) + - $(KUBECTL) delete secret pubkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret prvkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(SIDB_SECRET) -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(ORDS_SECRET) -n ${ORDSNAMESPACE} + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ${PRVKEY} + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(ORDSNAMESPACE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SIDB_PASSWORD_FILE) |base64 > e_$(SIDB_PASSWORD_FILE) + $(KUBECTL) create secret generic $(SIDB_SECRET) --from-literal=password=$(SIDB_PASSWORD) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic $(ORDS_SECRET) --from-file=password=e_$(SIDB_PASSWORD_FILE) -n $(ORDSNAMESPACE) + $(RM) e_$(SIDB_PASSWORD_FILE) $(SIDB_PASSWORD_FILE) + +step5d: + - $(KUBECTL) delete secret pubkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret prvkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(SIDB_SECRET) -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(ORDS_SECRET) -n ${ORDSNAMESPACE} + + +define registry_secret +printf "#!/bin/bash \n" >$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "echo enter password for $(USER_CONTAINER_REGISTRY)@$(ORACLE_CONTAINER_REGISTRY) \n" >$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "read -s scpwd \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "$(KUBECTL) create secret docker-registry $(SECRET_CONTAINER_REGISTRY) --docker-server=$(ORACLE_CONTAINER_REGISTRY) --docker-username=$(USER_CONTAINER_REGISTRY) --docker-password=\u0024scpwd --docker-email=$(USER_CONTAINER_REGISTRY) -n $(OPRNAMESPACE) \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "$(KUBECTL) create secret docker-registry $(SECRET_CONTAINER_REGISTRY) --docker-server=$(ORACLE_CONTAINER_REGISTRY) --docker-username=$(USER_CONTAINER_REGISTRY) --docker-password=\u0024scpwd --docker-email=$(USER_CONTAINER_REGISTRY) -n $(ORDSNAMESPACE) \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) + +bash $(SECRET_CONTAINER_REGISTRY_SCRIPT) +endef + +step6a: + $(call registry_secret) + +step6d: + $(KUBECTL) delete secret $(SECRET_CONTAINER_REGISTRY) -n $(OPRNAMESPACE) + + +define sidb + +cat<$(SIDB_CREATION) +#apiVersion: database.oracle.com/v4 +#kind: SingleInstanceDatabase +#metadata: +# name: oraoper-sidb +# namespace: $(OPRNAMESPACE) +#spec: +# replicas: 1 +# image: +# pullFrom: $(SIDB_IMAGE) +# pullSecrets: $(SECRET_CONTAINER_REGISTRY) +# prebuiltDB: true +# sid: FREE +# listenerPort: 30001 +# edition: free +# adminPassword: +# secretName: $(SIDB_SECRET) +# secretKey: password +# pdbName: FREEPDB1 +EOF + +$(KUBECTL) $(1) -f $(SIDB_CREATION) +endef + +step7: + $(call sidb,$(ACTION)) +step7a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step7 +step7d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step7 + + +define restservice +cat<$(REST_SERVER_CREATION) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: $(REST_SERVER_NAME) +# namespace: $(ORDSNAMESPACE) +#spec: +# image: $(ORDS_IMAGE) +# forceRestart: true +# encPrivKey: +# secretName: prvkey +# passwordKey: privateKey +# globalSettings: +# database.api.enabled: true +# poolSettings: +# - poolName: default +# autoUpgradeORDS: true +# autoUpgradeAPEX: true +# restEnabledSql.active: true +# plsql.gateway.mode: direct +# db.connectionType: customurl +# db.customURL: jdbc:oracle:thin:@//$(2) +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(ORDS_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(ORDS_SECRET) +# +EOF + +[ $(3) -eq 1 ] && { +sed -i 's/SYS/SYT/g' $(REST_SERVER_CREATION) +echo -e "TYPO" +} + +$(KUBECTL) $(1) -f $(REST_SERVER_CREATION) +endef + +step8: + $(eval TNS_ALIAS_CDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.clusterConnectString}}{{"\n"}}{{end}}')) + $(eval TNS_ALIAS_PDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}')) + echo $(TNS_ALIAS) + $(call restservice,$(ACTION),$(TNS_ALIAS_PDB),$(ERR)) +step8a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step8 ERR=0 +step8d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step8 ERR=0 +step8e: + $(MAKE) -f $(MAKEFILE) ACTION=apply step8 ERR=1 + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + +loginords: + @$(eval RESTPOD := $(shell $(KUBECTL) get pods --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -n $(ORDSNAMESPACE))) + $(KUBECTL) logs $(RESTPOD) -n $(ORDSNAMESPACE) + $(KUBECTL) exec $(RESTPOD) -n $(ORDSNAMESPACE) -it -- /bin/bash + +logindb: + $(eval PODPDB := $(shell $(KUBECTL) get pods --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -n $(OPRNAMESPACE)|grep -v oracle-database-operator)) + echo $(PODPDB) + $(KUBECTL) exec $(PODPDB) -n $(OPRNAMESPACE) -it -- bash + + +report: + $(KUBECTL) get pods -n $(OPRNAMESPACE) + $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) + $(KUBECTL) get pods -n $(ORDSNAMESPACE) + $(KUBECTL) get $(KIND) -n $(ORDSNAMESPACE) + + +someattributes: + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.connectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.tcpsConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.clusterConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.tcpsPdbConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}' + + + + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + + + +step9: sql +define dbenv +$(1): DB_PWD=`$(KUBECTL) get secrets sidb-db-auth -n $(OPRNAMESPACE) --template='{{.data.password | base64decode}}'` +$(1): POD_NAME=`$(KUBECTL) get pod -l "app=oraoper-sidb" -o custom-columns=NAME:.metadata.name -n $(OPRNAMESPACE) --no-headers` +$(1): TNSSTR=`$(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}'` +endef + +$(eval $(call dbenv,sqlplus sql)) +#$(eval $(call dbenv,sqlplus)) + +define copyfile +cat <create_mong_schema.sql +drop user MONGO cascade; +set echo on +set head on +create user MONGO identified by "My_Password1!"; +grant soda_app, create session, create table, create view, create sequence, create procedure, create job, +unlimited tablespace to MONGO; +conn MONGO/My_Password1!@${TNSSTR} +exec ords.enable_schema; +exit; +EOF +$(KUBECTL) cp ./create_mong_schema.sql $(POD_NAME):/home/oracle -n $(OPRNAMESPACE) +endef + +sql: + echo $(TNSSTR) + $(call copyfile) + @$(KUBECTL) exec -it $(POD_NAME) -n $(OPRNAMESPACE) -- sqlplus SYSTEM/$(DB_PWD)@$(TNSSTR) @/home/oracle/create_mong_schema.sql + +sqlplus: + @$(KUBECTL) exec -it $(POD_NAME) -n $(OPRNAMESPACE) -- sqlplus SYSTEM/$(DB_PWD)@$(TNSSTR) + + +define restservicemongo +cat <$(REST_SERVER_CREATION_MONGO) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: $(REST_SERVER_NAME_MONGO) +# namespace: $(ORDSNAMESPACE) +#spec: +# image: $(ORDS_IMAGE.1) +# forceRestart: true +# globalSettings: +# database.api.enabled: true +# mongo.enabled: true +# poolSettings: +# - poolName: default +# autoUpgradeORDS: true +# restEnabledSql.active: true +# plsql.gateway.mode: direct +# jdbc.MaxConnectionReuseCount: 5000 +# jdbc.MaxConnectionReuseTime: 900 +# jdbc.SecondsToTrustIdleConnection: 1 +# jdbc.InitialLimit: 100 +# jdbc.MaxLimit: 100 +# db.connectionType: customurl +# db.customURL: jdbc:oracle:thin:@//${2} +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: ords-db-auth +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: ords-db-auth +EOF +$(KUBECTL) $(1) -f $(REST_SERVER_CREATION_MONGO) +endef + + + +step10: + $(eval TNS_ALIAS_PDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}')) + echo $(TNS_ALIAS_PDB) + $(call restservicemongo,$(ACTION),$(TNS_ALIAS_PDB)) +step10a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step10 +step10d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step10 + + +step11: + echo "Open a port-forward to the MongoAPI service" + @nohup $(KUBECTL) port-forward service/$(REST_SERVER_NAME_MONGO) 27017:27017 -n $(ORDSNAMESPACE) 1>portfwd.log 2>&1 & + @echo "DOWNLOADING MONGOSH" + @$(CURL) https://downloads.mongodb.com/compass/$(MONGOSH).tgz --output mongosh-2.3.1-linux-x64.tgz + @echo "UNTAR FILE" + @$(TAR) -zxvf $(MONGOSH).tgz + ./$(MONGOSH)/bin/mongosh --tlsAllowInvalidCertificates 'mongodb://MONGO:My_Password1!@localhost:27017/MONGO?authMechanism=PLAIN&authSource=$external&tls=true&retryWrites=false&loadBalanced=true' + @echo "STOP PORT FRWD" + @kill `ps -ef | grep kubectl | grep 27017 | grep -v grep | awk '{printf $$2}'` + $(RM) $(MONGOSH).tgz + $(RM) -rf ./$(MONGOSH) + + +define buildtns +echo "Building tnsnames.ora" +cat <$(TNSADMIN)/$(TNSNAMES) +$(PDB1)=$(TNS1) + +$(PDB2)=$(TNS2) +EOF +$(KUBECTL) create secret generic multi-tns-admin -n $(ORDSNAMESPACE) --from-file=$(TNSADMIN)/ +endef + +step12a: + $(call buildtns) + +step12d: + $(KUBECTL) delete secret multi-tns-admin -n $(ORDSNAMESPACE) + +export SYSPWDFILE1=syspwdfile1 +export SYSPWDFILE2=syspwdfile2 +export ORDPWDFILE=ordspwdfile + + +step13a: + echo $(PDB1_PWD) > $(SYSPWDFILE1) + echo $(PDB2_PWD) > $(SYSPWDFILE2) + echo $(ORDS_MULTI_POOL_PWD) > $(ORDPWDFILE) + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ${PRVKEY} + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + #$(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(ORDSNAMESPACE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE1) |base64 > e_$(SYSPWDFILE1) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE2) |base64 > e_$(SYSPWDFILE2) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(KUBECTL) create secret generic $(PDB1_PRIV_AUTH_SECRET) --from-file=password=e_$(SYSPWDFILE1) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic $(PDB2_PRIV_AUTH_SECRET) --from-file=password=e_$(SYSPWDFILE2) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic $(MULTI_ORDS_AUTH_SECRET) --from-file=password=e_$(ORDPWDFILE) -n $(ORDSNAMESPACE) + $(RM) $(SYSPWDFILE1) $(SYSPWDFILE2) $(ORDPWDFILE) e_$(SYSPWDFILE1) e_$(SYSPWDFILE2) e_$(ORDPWDFILE) + +step13d: + - $(KUBECTL) delete secret pubkey -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret prvkey -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(PDB1_PRIV_AUTH_SECRET) -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(PDB2_PRIV_AUTH_SECRET) -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(MULTI_ORDS_AUTH_SECRET) -n $(ORDSNAMESPACE) + +define multisrv +cat <$(MULTISRV_MANIFEST) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: ords-multi-pool +# namespace: $(ORDSNAMESPACE) +#spec: +# image: container-registry.oracle.com/database/ords:24.1.1 +# forceRestart: true +# encPrivKey: +# secretName: prvkey +# passwordKey: privateKey +# globalSettings: +# database.api.enabled: true +# poolSettings: +# - poolName: pdb1 +# autoUpgradeAPEX: false +# autoUpgradeORDS: false +# db.connectionType: tns +# db.tnsAliasName: pdb1 +# tnsAdminSecret: +# secretName: multi-tns-admin +# restEnabledSql.active: true +# feature.sdw: true +# plsql.gateway.mode: proxied +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(MULTI_ORDS_AUTH_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(PDB1_PRIV_AUTH_SECRET) +# - poolName: pdb2 +# autoUpgradeAPEX: false +# autoUpgradeORDS: false +# db.connectionType: tns +# db.tnsAliasName: PDB2 +# tnsAdminSecret: +# secretName: multi-tns-admin +# restEnabledSql.active: true +# feature.sdw: true +# plsql.gateway.mode: proxied +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(MULTI_ORDS_AUTH_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(PDB1_PRIV_AUTH_SECRET) + +# +EOF +[ $(2) -eq 1 ] && { +sed -i 's/SYS/SYT/g' $(MULTISRV_MANIFEST) +echo -e "TYPO" +} + +$(KUBECTL) $(1) -f $(MULTISRV_MANIFEST) +endef + +step14: + $(call multisrv,$(ACTION),$(ERR)) +step14a: + $(MAKE) -f $(MAKEFILE) ACTION=apply ERR=0 step14 +step14d: + $(MAKE) -f $(MAKEFILE) ACTION=delete ERR=0 step14 +step14e: + $(MAKE) -f $(MAKEFILE) ACTION=apply ERR=1 step14 + + +define dumpinit +#!/bin/bash +NAMESPACE=${1} +KUBECTL=/usr/bin/kubectl +for _pod in `${KUBECTL} get pods --no-headers -o custom-columns=":metadata.name" --no-headers -n $${NAMESPACE}` +do + for _podinit in `${KUBECTL} get pod $${_pod} -n $${NAMESPACE} -o="custom-columns=INIT-CONTAINERS:.spec.initContainers[*].name" --no-headers` + do + echo "DUMPINIT $${_pod}:$${_podinit}" + ${KUBECTL} logs -f --since=0 $${_pod} -n $${NAMESPACE} -c $${_podinit} + done +done +endef + +diagordsinit: + $(call dumpinit ,$(ORDSNAMESPACE)) + diff --git a/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora new file mode 100644 index 00000000..1b1b8943 --- /dev/null +++ b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora @@ -0,0 +1,3 @@ +pdb1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdb1))) + +pdb2=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdb2))) diff --git a/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline new file mode 100644 index 00000000..b58a8a66 --- /dev/null +++ b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline @@ -0,0 +1 @@ +pdb1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) diff --git a/docs/sharding/README.md b/docs/sharding/README.md new file mode 100644 index 00000000..487d9ec3 --- /dev/null +++ b/docs/sharding/README.md @@ -0,0 +1,201 @@ +# Using Oracle Globally Distributed Database with Oracle Database Operator for Kubernetes + +Oracle Globally Distributed Database distributes segments of a data set across many databases (shards) on different computers, either on-premises or in cloud. This feature enables globally distributed, linearly scalable, multimodel databases. It requires no specialized hardware or software. Oracle Globally Distributed Database does all this while rendering the strong consistency, full power of SQL, support for structured and unstructured data, and the Oracle Database ecosystem. It meets data sovereignty requirements, and supports applications that require low latency and high availability. + +All of the shards together make up a single logical database, which is referred to as a Oracle Globally Distributed Database (GDD). + +Kubernetes provides infrastructure building blocks, such as compute, storage, and networks. Kubernetes makes the infrastructure available as code. It enables rapid provisioning of multi-node topolgies. Additionally, Kubernetes also provides statefulsets, which are the workload API objects that are used to manage stateful applications. This provides us lifecycle management elasticity for databases as a stateful application for various database topologies, such as Oracle Globally Distributed Database, Oracle Real Application Clusters (Oracle RAC), single instance Oracle Database, and other Oracle features and configurations. + +The Sharding Database controller in Oracle Database Operator deploys Oracle Globally Distributed Database Topology as a statefulset in the Kubernetes clusters, using Oracle Database and Global Data Services Docker images. The Oracle Sharding database controller manages the typical lifecycle of Oracle Globally Distributed Database topology in the Kubernetes cluster, as shown below: + +* Create primary statefulsets shards +* Create master and standby Global Data Services statefulsets +* Create persistent storage, along with statefulset +* Create services +* Create load balancer service +* Provision Oracle Globally Distributed Database topology by creating and configuring the following: + * Catalog database + * Shard Databases + * GSMs + * Shard scale up and scale down +* Shard topology cleanup + +The Oracle Sharding database controller provides end-to-end automation of Oracle Globally Distributed Database topology deployment in Kubernetes clusters. + +## Using Oracle Database Operator Sharding Controller + +Following sections provide the details for deploying Oracle Globally Distributed Database (Oracle Sharded Database) using Oracle Database Operator Sharding Controller with different use cases: + +* [Prerequisites for running Oracle Sharding Database Controller](#prerequisites-for-running-oracle-sharding-database-controller) +* [Oracle Database 23ai Free](#oracle-database-23ai-free) +* [Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-system-managed-sharding-topology-with-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) +* [Connecting to Shard Databases](#connecting-to-shard-databases) +* [Debugging and Troubleshooting](#debugging-and-troubleshooting) + +**Note** Before proceeding to the next section, you must complete the instructions given in each section, based on your enviornment, before proceeding to next section. + +## Prerequisites for running Oracle Sharding Database Controller + +**IMPORTANT:** You must make the changes specified in this section before you proceed to the next section. + +### 1. Kubernetes Cluster: To deploy Oracle Sharding database controller with Oracle Database Operator, you need a Kubernetes Cluster which can be one of the following: + +* A Cloud-based Kubernetes cluster, such as [OCI on Container Engine for Kubernetes (OKE)](https://www.oracle.com/cloud-native/container-engine-kubernetes/) or +* An On-Premises Kubernetes Cluster, such as [Oracle Linux Cloud Native Environment (OLCNE)](https://docs.oracle.com/en/operating-systems/olcne/) cluster. + +To use Oracle Sharding Database Controller, ensure that your system is provisioned with a supported Kubernetes release. Refer to the [Release Status Section](../../README.md#release-status). + +#### Mandatory roles and privileges requirements for Oracle Sharding Database Controller + + Oracle Sharding Database Controller uses Kubernetes objects such as :- + + | Resources | Verbs | + | --- | --- | + | Pods | create delete get list patch update watch | + | Containers | create delete get list patch update watch | + | PersistentVolumeClaims | create delete get list patch update watch | + | Services | create delete get list patch update watch | + | Secrets | create delete get list patch update watch | + | Events | create patch | + +### 2. Deploy Oracle Database Operator + +To deploy Oracle Database Operator in a Kubernetes cluster, go to the section [Install Oracle DB Operator](../../README.md#install-oracle-db-operator) in the README, and complete the operator deployment before you proceed further. If you have already deployed the operator, then proceed to the next section. + +**IMPORTANT:** Make sure you have completed the steps for [Role Binding for access management](../../README.md#role-binding-for-access-management) as well before installing the Oracle DB Operator. + +### 3. Oracle Database and Global Data Services Docker Images +Choose one of the following deployment options: + + **Use Oracle-Supplied Docker Images:** + The Oracle Sharding Database controller uses Oracle Global Data Services and Oracle Database images to provision the sharding topology. + + You can also download the pre-built Oracle Global Data Services and Oracle Database images from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of Oracle Globally Distributed Database topology by deploying on OKE and OLCNE. You can refer to [Oracle Container Registry Images for Oracle Globally Distributed Database Deployment](https://github.com/oracle/db-sharding/blob/master/container-based-sharding-deployment/README.md#oracle-container-registry-images-for-oracle-globally-distributed-database-deployment) + + **Note:** You will need to accept Agreement from container-registry.orcale.com to be able to pull the pre-built container images. + + **OR** + + **Build your own Oracle Database and Global Data Services Docker Images:** + You can build these images using instructions provided on Oracle official GitHub Repositories: + * [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/container-based-sharding-deployment) + * [Oracle Database Image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance) + +After the images are ready, push them to your Docker Images Repository, so that you can pull them during Oracle Globally Distributed Database topology provisioning. + +You can either download the images and push them to your Docker Images Repository, or, if your Kubernetes cluster can reach OCR, you can download these images directly from OCR. + +**Note**: In the Oracle Globally Distributed Database Topology example yaml files, we are using GDS and database images available on [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). + +**Note:** In case you want to use the `Oracle Database 23ai Free` Image for Database and GSM, refer to section [Oracle Database 23ai Free](#oracle-database-23ai-free) for more details. + +### 4. Create a namespace for the Oracle Globally Distributed Database Setup + + Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Globally Distributed Database Topology Setup will be provisioned in this namespace named `shns`. For example: + + ```sh + #### Create the namespace + kubectl create ns shns + + #### Check the created namespace + kubectl get ns + ``` + +### 5. Create a Kubernetes secret for the database installation owner for the Oracle Globally Distributed Database Topology Deployment + +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generate the encrypted password file during the deployment. If you want to use Prebuilt Oracle Database and Oracle GSM Images from Oracle Container Registry for your deployment, you can refer to [Oracle Container Registry Images for Oracle Globally Distributed Database Deployment](https://github.com/oracle/db-sharding/blob/master/container-based-sharding-deployment/README.md#oracle-container-registry-images-for-oracle-globally-distributed-database-deployment) + +Create a Kubernetes secret named `db-user-pass-rsa` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) + +After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. + +### 6. Provisioning a Persistent Volume having an Oracle Database Gold Image + +This step is needed when you want to provision a Persistent Volume having an Oracle Database Gold Image for Database Cloning. + +In case of an `OCI OKE` cluster, you can use this Persistent Volume during provisioning Shard Databases by cloning in the same Availability Domain or you can use a Full Backup of this Persistent Volume during provisioning Shard Databases by cloning in different Availability Domains. + +You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. + +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Globally Distributed Database using Oracle 23ai Free Database and GSM Images. + +## Oracle Database 23ai Free + +Please refer to [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) documentation for more details. + +If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Oracle Globally Distributed Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: + +* To deploy using the FREE Database and GSM Image, you will need to add the additional parameter `dbEdition: "free"` to the .yaml file. +* Refer to [Sample Oracle Globally Distributed Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. +* For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. +* Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +* Total number of chunks for FREE Database defaults to `12` if `CATALOG_CHUNKS` parameter is not specified. This default value is determined considering limitation of 12 GB of user data on disk for oracle free database. + + +## Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Globally Distributed Database Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: + +[1. Provisioning Oracle Globally Distributed Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Globally Distributed Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Globally Distributed Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) + + +## Provisioning Oracle Globally Distributed Database Topology with User-Defined Sharding in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Globally Distributed Database Topology with `User-Defined Sharding` on your Cloud based Kubernetes cluster. + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: + +[1. Provisioning Oracle Globally Distributed Database with User-Defined Sharding without Database Gold Image](./provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database with User-Defined Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md) +[3. Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[4. Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[5. Provisioning Oracle Globally Distributed Database with User-Defined Sharding and send Notification using OCI Notification Service](./provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md) +[6. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) +[7. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) + + +## Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Globally Distributed Database Topology with `System-Managed Sharding` and with `RAFT Replication` enabled on your Cloud based Kubernetes cluster. + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: + +[1. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) + +## Connecting to Oracle Globally Distributed Database + +After the Oracle Globally Distributed Database Topology has been provisioned using the Sharding Controller in Oracle Database Kubernetes Operator, you can follow the steps in this document to connect to the Oracle Globally Distributed Database or to the individual Shards: [Database Connectivity](./provisioning/database_connection.md) + +## Debugging and Troubleshooting + +To debug the Oracle Globally Distributed Database Topology provisioned using the Sharding Controller of Oracle Database Kubernetes Operator, follow this document: [Debugging and troubleshooting](./provisioning/debugging.md) + +## Known Issues + +* For both ENTERPRISE and FREE Images, if the Oracle Global Service Manager (GSM) POD is stopped using `crictl stopp` at the worker node level, it leaves GSM in failed state. The `gdsctl` commands fail with error **GSM-45034: Connection to GDS catalog is not established**. This is because with change, the network namespace is lost when checked from the GSM Pod. +* For both ENTERPRISE and FREE Images, restart of the node running CATALOG using `/sbin/reboot -f` results in **GSM-45076: GSM IS NOT RUNNING**. After you encounter this issue, wait until the `gdsctl` commands start working as the database connection start working. When the stack comes up again after the node restart, you can encounter an unexpected restart of the GSM Pod. +* For both ENTERPRISE and FREE Images, if the CATALOG Database Pod is stopped from the worker node using the command `crictl stopp`, then it can leave the CATALOG in an error state. This error state results in GSM reporting the error message **GSM-45034: Connection to GDS catalog is not established.** +* For both ENTERPRISE and FREE Images, either restart of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from the worker node using `crictl stopp` command can leave the shard in an error state. +* For both ENTERPRISE and FREE Images, after force restarts of the node running GSM Pod, the GSM pod restarts multiple times, and then becomes stable. The GSM pod restarts itself because when the worker node comes up, the GSM pod is recreated, but does not obtain DB connection to the Catalog. The Liveness Probe fails which restarts the Pod. Be aware of this issue, and permit the GSM pod to become stable. +* **DDL Propagation from Catalog to Shards:** DDL Propagation from the Catalog Database to the Shard Databases can take several minutes to complete. To see faster propagation of DDLs such as the tablespace set from the Catalog Database to the Shard Databases, Oracle recommends that you set smaller chunk values by using the `CATALOG_CHUNKS` attribute in the .yaml file while creating the Sharded Database Topology. +* If the version of `openssl` used to create the encrypted password file for Kubernetes secrets is not compatible with the openssl verion of the Oracle Database and Oracle GSM Image, then you can get the error `OS command returned code : 1, returned error : bad magic number` in the logs of the Database or GSM Pod. In this case, during the deployment, openssl will not be able to decrypt the encrypted password file and the deployment will not complete. \ No newline at end of file diff --git a/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md new file mode 100644 index 00000000..db534575 --- /dev/null +++ b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md @@ -0,0 +1,48 @@ +# Create kubernetes secret for db user + +Use the following steps to create an encrypted file with a password for the DB User: + +- Create a text file that has the password that you want to use for the DB user. +- Create an RSA key pair using `openssl`. +- Encrypt the text file with a password, using `openssl` with the RSA key pair generated earlier. +- Remove the initial text file. +- Create the Kubernetes Secret named `db-user-pass-rsa` using the encrypted file. + +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +To understand how to create your own file, use the following example: + +```sh +# Create a directory for files for the secret: +rm -rf /tmp/.secrets/ +mkdir /tmp/.secrets/ + +# Create directories and initialize the variables +RSADIR="/tmp/.secrets" +PRIVKEY="${RSADIR}"/"key.pem" +PUBKEY="${RSADIR}"/"key.pub" +NAMESPACE="shns" +PWDFILE="${RSADIR}"/"pwdfile.txt" +PWDFILE_ENC="${RSADIR}"/"pwdfile.enc" +SECRET_NAME="db-user-pass-rsa" + +# Generate the RSA Key +openssl genrsa -out "${RSADIR}"/key.pem +openssl rsa -in "${RSADIR}"/key.pem -out "${RSADIR}"/key.pub -pubout + +# Create a text file with the password +rm -f $PWDFILE_ENC +echo ORacle_23c > ${RSADIR}/pwdfile.txt + +# Create encrypted file from the text file using the RSA key +openssl pkeyutl -in $PWDFILE -out $PWDFILE_ENC -pubin -inkey $PUBKEY -encrypt + +# Remove the initial text file: +rm -f $PWDFILE + +# Deleting the existing secret if existing +kubectl delete secret $SECRET_NAME -n $NAMESPACE + +# Create the Kubernetes secret in namespace "NAMESPACE" +kubectl create secret generic $SECRET_NAME --from-file=$PWDFILE_ENC --from-file=${PRIVKEY} -n $NAMESPACE +``` diff --git a/doc/sharding/provisioning/database_connection.md b/docs/sharding/provisioning/database_connection.md similarity index 84% rename from doc/sharding/provisioning/database_connection.md rename to docs/sharding/provisioning/database_connection.md index 7f64bbd5..58a54930 100644 --- a/doc/sharding/provisioning/database_connection.md +++ b/docs/sharding/provisioning/database_connection.md @@ -1,10 +1,10 @@ # Database Connectivity -The Oracle Database Sharding Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the container. +The Oracle Database Sharding Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the containers. ## Below is an example setup with connection details -Check the details of the Sharding Topology provisioned using Sharding Controller: +Check the details of the Sharding Topology provisioned by using the Sharding Controller: ```sh $ kubectl get all -n shns @@ -35,10 +35,10 @@ statefulset.apps/shard1 1/1 10d statefulset.apps/shard2 1/1 10d ``` -After you have the external IP address, you can use the services shown below to make the database connection using the above example: +After you have the external IP address, you can use the services shown below to make the database connection. Using the preceding example, that file should look as follows: 1. **Direct connection to the CATALOG Database**: Connect to the service `catalogpdb` on catalog container external IP `xx.xx.xx.116` on port `1521` 2. **Direct connection to the shard Database SHARD1**: Connect to the service `shard1pdb` on catalog container external IP `xx.xx.xx.187` on port `1521` 3. **Direct connection to the shard Database SHARD2**: Connect to the service `shard2pdb` on catalog container external IP `xx.xx.xx.197` on port `1521` -4. **Connection to SHARDED Database for DML activity (INSERT/UPDATE/DELETE)**: Connect to the service `oltp_rw_svc.catalog.oradbcloud` either on primary gsm GSM1 container external IP `xx.xx.xx.38` on port `1522` **or** on standby gsm GSM2 container external IP `xx.xx.xx.66` on port `1522` +4. **Connection to Oracle Globally Distributed Database for DML activity (INSERT/UPDATE/DELETE)**: Connect to the service `oltp_rw_svc.catalog.oradbcloud` either on primary gsm GSM1 container external IP `xx.xx.xx.38` on port `1522` **or** on standby gsm GSM2 container external IP `xx.xx.xx.66` on port `1522` 5. **Connection to the catalog database for DDL activity**: Connect to the service `GDS$CATALOG.oradbcloud` on catalog container external IP `xx.xx.xx.116` on port `1521` diff --git a/docs/sharding/provisioning/debugging.md b/docs/sharding/provisioning/debugging.md new file mode 100644 index 00000000..372f104d --- /dev/null +++ b/docs/sharding/provisioning/debugging.md @@ -0,0 +1,50 @@ +# Debugging and Troubleshooting + +When the Oracle Database Sharding Topology is provisioned using the Oracle Database Kubernetes Operator, debugging an issue with the deployment depends on which stage the issue is seen. + +The following sections provide possible issue cases, and the steps to debug such an issue: + +## Failure during the provisioning of Kubernetes Pods + +If the failure occurs during the provisioning, then check the status of the Kubernetes Pod that has failed to be deployed. + +To check the logs of the Pod that has a failure, use the command that follows. In this example, we are checking for failure in provisioning Pod `pod/catalog-0`: + +```sh +kubectl logs -f pod/catalog-0 -n shns +``` + +If the Pod has failed to provision due to an issue with the Docker Image, then you will see the error `Error: ErrImagePull` in the logs displayed by the command. + +If the Pod has not yet been initialized, then use the following command to find the reason for it: + +```sh +kubectl describe pod/catalog-0 -n shns +``` + +If the failure is related to the Cloud Infrastructure, then troubleshoot the infrastructure using the documentation from the Cloud infrastructure provider. + +## Failure in the provisioning of the Oracle Globally Distributed Database + +If the failure occures after the Kubernetes Pods are created but during the execution of the scripts to create the shard databases, catalog database or the GSM, then you must troubleshoot that failure at the individual Pod level. + +Initially, check the logs of the Kubernetes Pod using the following command (change the name of the Pod in the command with the actual Pod): + +```sh +kubectl logs -f pod/catalog-0 -n shns +``` + +To check the logs at the GSM level, the database level, or at the host level, switch to the corresponding Kubernetes container. For example: + +```sh +kubectl exec -it catalog-0 -n shns /bin/bash +``` + +When you are in the correct Kubernetes container, you can troubleshooting the corresponding component using the alert log, the trace files, and so on, just as you would with a normal Sharding Database Deployment. For more information, see: [Oracle Database Sharding Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) + + +## Debugging using Database Events + +* You can enable database events as part of the Sharded Database Deployment +* Enable events using `envVars` +* One example of enabling Database Events is [sharding_provisioning_with_db_events.md](./debugging/sharding_provisioning_with_db_events.md) diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md new file mode 100644 index 00000000..fa73920f --- /dev/null +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md @@ -0,0 +1,40 @@ +# Example of provisioning Oracle Sharded Database along with DB Events set at Database Level + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This example sets a Database Event at the Database Level for Catalog and Shard Databases. + +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Event: `10798 trace name context forever, level 7` set along with `GWM_TRACE level 263` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `sharding_provisioning_with_db_events.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + + +Use the file: [sharding_provisioning_with_db_events.yaml](./sharding_provisioning_with_db_events.yaml) for this use case as below: + +1. Deploy the `sharding_provisioning_with_db_events.yaml` file: + ```sh + kubectl apply -f sharding_provisioning_with_db_events.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` +3. You can confirm the Database event and the tracing enabled in the RDBMS alert log file of the Database. \ No newline at end of file diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml new file mode 100644 index 00000000..40ad600a --- /dev/null +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml @@ -0,0 +1,68 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md new file mode 100644 index 00000000..0425920b --- /dev/null +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md @@ -0,0 +1,43 @@ +# Example of provisioning Oracle Sharded Database with Oracle 23ai FREE Database and GSM Images + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This example uses the Oracle 23ai FREE Database and GSM Images. + +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `sharding_provisioning_with_free_images.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` + + +To get the Oracle 23ai FREE Database and GSM Images: + * The Oracle 23ai FREE RDBMS Image used is `container-registry.oracle.com/database/free:latest`. Check [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) for details. + * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * The the Oracle 23ai FREE GSM Image used is `container-registry.oracle.com/database/gsm:latest`. + * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * You need to change `dbImage` and `gsmImage` tag with the images you want to use in your enviornment in file `sharding_provisioning_with_free_images.yaml`. + +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + + + +Use the file: [sharding_provisioning_with_free_images.yaml](./sharding_provisioning_with_free_images.yaml) for this use case as below: + +1. Deploy the `sharding_provisioning_with_free_images.yaml` file: + ```sh + kubectl apply -f sharding_provisioning_with_free_images.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` \ No newline at end of file diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml new file mode 100644 index 00000000..dadd619a --- /dev/null +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml @@ -0,0 +1,57 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/doc/sharding/provisioning/oraclesi.yaml b/docs/sharding/provisioning/oraclesi.yaml similarity index 78% rename from doc/sharding/provisioning/oraclesi.yaml rename to docs/sharding/provisioning/oraclesi.yaml index ffc5734b..cac70ffa 100644 --- a/doc/sharding/provisioning/oraclesi.yaml +++ b/docs/sharding/provisioning/oraclesi.yaml @@ -1,14 +1,14 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: oshard-gold-image-pvc19c + name: oshard-gold-image-pvc21c namespace: shns labels: - app: oshard19cdb-dep + app: oshard21cdb-dep spec: accessModes: - ReadWriteOnce @@ -18,28 +18,28 @@ spec: storageClassName: oci selector: matchLabels: - failure-domain.beta.kubernetes.io/zone: "EU-FRANKFURT-1-AD-1" + topology.kubernetes.io/zone: "PHX-AD-1" --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: oshard19cdb + name: oshard21cdb namespace: shns labels: - app: oshard19cdb-dep + app: oshard21cdb-dep spec: selector: matchLabels: - app: oshard19cdb-dep + app: oshard21cdb-dep serviceName: gold-shard template: metadata: labels: - app: oshard19cdb-dep + app: oshard21cdb-dep spec: containers: - image: container-registry.oracle.com/database/enterprise:latest - name: oshard19cdb + name: oshard21cdb ports: - containerPort: 1521 name: db1-dbport @@ -68,17 +68,17 @@ spec: volumes: - name: data persistentVolumeClaim: - claimName: oshard-gold-image-pvc19c + claimName: oshard-gold-image-pvc21c - name: dshm emptyDir: medium: Memory nodeSelector: - failure-domain.beta.kubernetes.io/zone: "EU-FRANKFURT-1-AD-1" + topology.kubernetes.io/zone: "PHX-AD-1" --- apiVersion: v1 kind: Service metadata: - name: oshard19cdb + name: oshard21cdb namespace: shns spec: ports: @@ -95,5 +95,4 @@ spec: port: 6234 targetPort: db1-onsrport selector: - app: oshard19cdb-dep - + app: oshard21cdb-dep \ No newline at end of file diff --git a/doc/sharding/provisioning/oraclesi_pvc_commented.yaml b/docs/sharding/provisioning/oraclesi_pvc_commented.yaml similarity index 67% rename from doc/sharding/provisioning/oraclesi_pvc_commented.yaml rename to docs/sharding/provisioning/oraclesi_pvc_commented.yaml index a6facda0..43b50a5e 100644 --- a/doc/sharding/provisioning/oraclesi_pvc_commented.yaml +++ b/docs/sharding/provisioning/oraclesi_pvc_commented.yaml @@ -1,46 +1,45 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # - -# apiVersion: v1 -# kind: PersistentVolumeClaim -# metadata: -# name: oshard-gold-image-pvc19c -# namespace: shns -# labels: -# app: oshard19cdb-dep -# spec: -# accessModes: -# - ReadWriteOnce -# resources: -# requests: -# storage: 50Gi -# storageClassName: oci -# selector: -# matchLabels: -# failure-domain.beta.kubernetes.io/zone: "EU-FRANKFURT-1-AD-1" -# --- +#apiVersion: v1 +#kind: PersistentVolumeClaim +#metadata: +# name: oshard-gold-image-pvc21c +# namespace: shns +# labels: +# app: oshard21cdb-dep +#spec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 50Gi +# storageClassName: oci +# selector: +# matchLabels: +# topology.kubernetes.io/zone: "PHX-AD-1" +#--- apiVersion: apps/v1 kind: StatefulSet metadata: - name: oshard19cdb + name: oshard21cdb namespace: shns labels: - app: oshard19cdb-dep + app: oshard21cdb-dep spec: selector: matchLabels: - app: oshard19cdb-dep + app: oshard21cdb-dep serviceName: gold-shard template: metadata: labels: - app: oshard19cdb-dep + app: oshard21cdb-dep spec: containers: - image: container-registry.oracle.com/database/enterprise:latest - name: oshard19cdb + name: oshard21cdb ports: - containerPort: 1521 name: db1-dbport @@ -69,17 +68,17 @@ spec: volumes: - name: data persistentVolumeClaim: - claimName: oshard-gold-image-pvc19c + claimName: oshard-gold-image-pvc21c - name: dshm emptyDir: medium: Memory nodeSelector: - failure-domain.beta.kubernetes.io/zone: "EU-FRANKFURT-1-AD-1" + topology.kubernetes.io/zone: "PHX-AD-1" --- apiVersion: v1 kind: Service metadata: - name: oshard19cdb + name: oshard21cdb namespace: shns spec: ports: @@ -96,5 +95,4 @@ spec: port: 6234 targetPort: db1-onsrport selector: - app: oshard19cdb-dep - + app: oshard21cdb-dep \ No newline at end of file diff --git a/doc/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md b/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md similarity index 95% rename from doc/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md rename to docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md index 581fc62c..0a453c15 100644 --- a/doc/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md +++ b/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md @@ -2,14 +2,14 @@ In this use case, a Persistent Volume with a Oracle Database Gold Image is created. - This is required when you do not already have a Persistent Volume with a Database Gold Image from which you can clone database to save time while deploying Oracle Sharding topology using Oracle Sharding controller. + This is required when you do not already have a Persistent Volume with a Database Gold Image from which you can clone database to save time while deploying Oracle Globally Distributed Database topology using Oracle Sharding controller. This example uses file `oraclesi.yaml` to provision a single instance Oracle Database: * A Persistent Volume Claim * Repository location for Database Docker Image: `image: container-registry.oracle.com/database/enterprise:latest` * Namespace: `shns` -* Tag `nodeSelector` to deploy the Single Oracle Database in AD `EU-FRANKFURT-1-AD-1` +* Tag `nodeSelector` to deploy the Single Oracle Database in AD `PHX-AD-1` In this example, we are using pre-built Oracle Database image available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md new file mode 100644 index 00000000..9ffebad9 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -0,0 +1,58 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. + +This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. + +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. + +NOTE: + +* Cloning from Block Volume Backup in OCI enables the new Persistent Volumes to be created in other ADs. +* To specify the AD where you want to provision the database Pod, use the tag `nodeSelector` and the POD will be provisioned in a node running in that AD. +* To specify GSM containers, you can also use the tag `nodeSelector` to specify the AD. +* Before you can provision with the Gold Image, you need the OCID of the Persistent Volume that has the Oracle Database Gold Image. + +1. Check the OCID of the Persistent Volume provisioned for the Oracle Database Gold Image: + ```sh + kubectl get pv -n shns + ``` +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* `RAFT Replication` enabled + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone_across_ads.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +Use the file: [snr_ssharding_shard_prov_clone_across_ads.yaml](./snr_ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_clone_across_ads.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_clone_across_ads.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md new file mode 100644 index 00000000..054d760e --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -0,0 +1,54 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. + +This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. + +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. + +**NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. + +1. Check the OCID of the Persistent Volume provisioned earlier using below command: + + ```sh + kubectl get pv -n shns + ``` + +2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` +* `RAFT Replication` enabled + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [snr_ssharding_shard_prov_clone.yaml](./snr_ssharding_shard_prov_clone.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_clone.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_clone.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md new file mode 100644 index 00000000..253d099b --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md @@ -0,0 +1,44 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. + +**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +By default, the System-Managed with RAFT Replication deploys the Sharded Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. + +This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 120 chunks per shard) +* Namespace: `shns` +* `RAFT Replication` enabled + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + + +Use the file: [snr_ssharding_shard_prov_chunks.yaml](./snr_ssharding_shard_prov_chunks.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_chunks.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_chunks.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md new file mode 100644 index 00000000..e017f6a9 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md @@ -0,0 +1,48 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed with RAFT Replication is deployed using Oracle Sharding controller. + +This example uses `snr_ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Tags `memory` and `cpu` to control the Memory and CPU of the PODs +* Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_memory_cpu.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. + +Use the YAML file [snr_ssharding_shard_prov_memory_cpu.yaml](./snr_ssharding_shard_prov_memory_cpu.yaml). + +1. Deploy the `snr_ssharding_shard_prov_memory_cpu.yaml` file: + + ```sh + kubectl apply -f snr_ssharding_shard_prov_memory_cpu.yaml + ``` + +1. Check the details of a POD. For example: To check the details of Pod `shard1-0`: + + ```sh + kubectl describe pod/shard1-0 -n shns + ``` +3. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md new file mode 100644 index 00000000..3b8d1665 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md @@ -0,0 +1,88 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. + +This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* Configmap to send notification email when a particular operation is completed. For example: When a shard is added. +* `RAFT Replication` enabled + +**NOTE:** + +* The notification will be sent using a configmap created with the credentials of the OCI user account in this use case. + +We will create a topic in Notification Service of the OCI Console and use its OCID. + +To do this: + +1. Create a `configmap_data.txt` file, such as the following, which has the OCI User details that will be used to send notfication: + + ```sh + user=ocid1.user.oc1........fx7omxfq + fingerprint=fa:18:98:...............:8a + tenancy=ocid1.tenancy.oc1..aaaa.......orpn7inq + region=us-phoenix-1 + topicid=ocid1.onstopic.oc1.phx.aaa............6xrq + ``` +2. Create a configmap using the below command using the file created above: + ```sh + kubectl create configmap onsconfigmap --from-file=./configmap_data.txt -n shns + ``` + +3. Create a key file `priavatekey` having the PEM key of the OCI user being used to send notification: + ```sh + -----BEGIN PRIVATE KEY-G---- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCXYxA0DJvEwtVR + +o4OxrunL3L2NZJRADTFR+TDHqrNF1JwbaFBizSdL+EXbxQW1faZs5lXZ/sVmQF9 + . + . + . + zn/xWC0FzXGRzfvYHhq8XT3omf6L47KqIzqo3jDKdgvVq4u+lb+fXJlhj6Rwi99y + QEp36HnZiUxAQnR331DacN+YSTE+vpzSwZ38OP49khAB1xQsbiv1adG7CbNpkxpI + nS7CkDLg4Hcs4b9bGLHYJVY= + -----END PRIVATE KEY----- + ``` +4. Use the key file `privatekey` to create a Kubernetes secret in namespace `shns`: + + ```sh + kubectl create secret generic my-secret --from-file=./privatekey -n shns + ``` + +5. Use this command to check details of the secret that you created: + + ```sh + kubectl describe secret my-secret -n shns + ``` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_send_notification.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [snr_ssharding_shard_prov_send_notification.yaml](./snr_ssharding_shard_prov_send_notification.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_send_notification.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_send_notification.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md new file mode 100644 index 00000000..91caddf1 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md @@ -0,0 +1,41 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. + +**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + + +Use the file: [snr_ssharding_shard_prov.yaml](./snr_ssharding_shard_prov.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md new file mode 100644 index 00000000..fc093654 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md @@ -0,0 +1,51 @@ +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned using Oracle Database Sharding controller. + +**NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. + +In this use case, the existing database Sharding is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_delshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +NOTE: Use tag `isDelete: enable` to delete the shard you want. + +This use case deletes the shard `shard4` from the above Sharding Topology. + +Use the file: [snr_ssharding_shard_prov_delshard.yaml](./snr_ssharding_shard_prov_delshard.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_delshard.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_delshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + +**NOTE:** After you apply `snr_ssharding_shard_prov_delshard.yaml`, the change may not be visible immediately. When the shard is removed, first the chunks will be moved out of that shard that is going to be deleted. + +To monitor the chunk movement, use the following command: + +```sh +# Switch to the primary GSM Container: +kubectl exec -i -t gsm1-0 -n shns /bin/bash + +# Check the status of the chunks and repeat to observe the chunk movement: +gdsctl config chunks +``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md new file mode 100644 index 00000000..3461bf13 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md @@ -0,0 +1,38 @@ +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled + +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. + +In this use case, the existing Oracle Database sharding topology is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_extshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. + +Use the file: [snr_ssharding_shard_prov_extshard.yaml](./snr_ssharding_shard_prov_extshard.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_extshard.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_extshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard4-0": + kubectl logs -f pod/shard4-0 -n shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml new file mode 100644 index 00000000..53b93a0d --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml @@ -0,0 +1,58 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml new file mode 100644 index 00000000..0230eac2 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml @@ -0,0 +1,61 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "CATALOG_CHUNKS" + value: "120" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml new file mode 100644 index 00000000..fcc18da0 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml @@ -0,0 +1,83 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml new file mode 100644 index 00000000..0663f8a5 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml @@ -0,0 +1,91 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml new file mode 100644 index 00000000..ce194246 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml @@ -0,0 +1,69 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + isDelete: enable + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml new file mode 100644 index 00000000..8848b8c7 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml @@ -0,0 +1,68 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/doc/sharding/provisioning/shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml similarity index 51% rename from doc/sharding/provisioning/shard_prov_memory_cpu.yaml rename to docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml index a542ebab..dce4ba29 100644 --- a/doc/sharding/provisioning/shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml @@ -1,8 +1,9 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +--- +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -20,6 +21,9 @@ spec: value: "600" - name: "INIT_PGA_SIZE" value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary - name: shard2 storageSizeInGb: 50 resources: @@ -31,6 +35,23 @@ spec: value: "600" - name: "INIT_PGA_SIZE" value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary catalog: - name: catalog storageSizeInGb: 50 @@ -38,30 +59,31 @@ spec: requests: memory: "1000Mi" cpu: "1000m" + imagePullPolicy: "Always" gsm: - name: gsm1 + imagePullPolicy: "Always" storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" + region: primary - name: gsm2 + imagePullPolicy: "Always" storageSizeInGb: 50 - replicas: 1 - envVars: - - name: "SERVICE1_PARAMS" - value: "service_name=oltp_rw_svc;service_role=primary" - - name: "SERVICE2_PARAMS" - value: "service_name=oltp_ro_svc;service_role=primary" + region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred - scriptsLocation: "set -ex;curl https://codeload.github.com/oracle/db-sharding/tar.gz/master | tar -xz --strip=4 db-sharding-master/docker-based-sharding-deployment/dockerfiles/21.3.0/scripts; cp -i -r scripts/* /opt/oracle/scripts/sharding/scripts/;cp -i -r scripts/*py /opt/oracle/scripts/sharding" - secret: db-user-pass - isExternalSvc: false + dbEdition: "free" + replicationType: "native" + isExternalSvc: False isDeleteOraPvc: True - namespace: shns + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml new file mode 100644 index 00000000..2b410e8b --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml @@ -0,0 +1,85 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + nsConfigMap: onsconfigmap + nsSecret: my-secret + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md new file mode 100644 index 00000000..4d24655d --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -0,0 +1,56 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the Oracle Database sharding topology with System-Managed Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. + +This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. + +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. + +NOTE: + +* Cloning from Block Volume Backup in OCI enables the new Persistent Volumes to be created in other ADs. +* To specify the AD where you want to provision the database Pod, use the tag `nodeSelector` and the POD will be provisioned in a node running in that AD. +* To specify GSM containers, you can also use the tag `nodeSelector` to specify the AD. +* Before you can provision with the Gold Image, you need the OCID of the Persistent Volume that has the Oracle Database Gold Image. + +1. Check the OCID of the Persistent Volume provisioned for the Oracle Database Gold Image: + ```sh + kubectl get pv -n shns + ``` +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. + + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +Use the file: [ssharding_shard_prov_clone_across_ads.yaml](./ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_clone_across_ads.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_clone_across_ads.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md new file mode 100644 index 00000000..5e44a601 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -0,0 +1,51 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this case, the database is created automatically by cloning from an existing Oracle Database Gold Image during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology is deployed using Oracle Sharding controller. + +This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. + +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. + +**NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. + +1. Check the OCID of the Persistent Volume provisioned earlier using below command: + + ```sh + kubectl get pv -n shns + ``` + +2. This example uses `ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +Use the file: [ssharding_shard_prov_clone.yaml](./ssharding_shard_prov_clone.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_clone.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_clone.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md new file mode 100644 index 00000000..649fc7c4 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md @@ -0,0 +1,41 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +By default, the System-Managed Sharding deploys the Sharded Database with 120 chunks per Shard Database. If, for example, we have three shards in the Sharded Database, it will be total of 360 chunks. In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. + +This example uses `ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 40 chunks per shard) +* Namespace: `shns` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + + +Use the file: [ssharding_shard_prov_chunks.yaml](./ssharding_shard_prov_chunks.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_chunks.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_chunks.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md new file mode 100644 index 00000000..d284bf9b --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md @@ -0,0 +1,45 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +This example uses `ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Tags `memory` and `cpu` to control the Memory and CPU of the PODs +* Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_memory_cpu.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + + **NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. + +Use the YAML file [ssharding_shard_prov_memory_cpu.yaml](./ssharding_shard_prov_memory_cpu.yaml). + +1. Deploy the `ssharding_shard_prov_memory_cpu.yaml` file: + + ```sh + kubectl apply -f ssharding_shard_prov_memory_cpu.yaml + ``` + +1. Check the details of a POD. For example: To check the details of Pod `shard1-0`: + + ```sh + kubectl describe pod/shard1-0 -n shns + ``` +3. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/doc/sharding/provisioning/provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md similarity index 62% rename from doc/sharding/provisioning/provisioning_with_notification_using_oci_notification.md rename to docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md index 7df7bc05..e77718f4 100644 --- a/doc/sharding/provisioning/provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,15 +1,17 @@ -# Provisioning Oracle Database Sharding Topology and Send Notification Using OCI Notification Service +# Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This example uses `shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. + +This example uses `ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. -* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea` +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` * Configmap to send notification email when a particular operation is completed. For example: When a shard is added. **NOTE:** @@ -26,8 +28,8 @@ To do this: user=ocid1.user.oc1........fx7omxfq fingerprint=fa:18:98:...............:8a tenancy=ocid1.tenancy.oc1..aaaa.......orpn7inq - region=eu-frankfurt-1 - topicid=ocid1.onstopic.oc1.eu-frankfurt-1.aaa............6xrq + region=us-phoenix-1 + topicid=ocid1.onstopic.oc1.phx.aaa............6xrq ``` 2. Create a configmap using the below command using the file created above: ```sh @@ -61,14 +63,18 @@ To do this: In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_send_notification.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. -Use the file: [shard_prov_send_notification.yaml](./shard_prov_send_notification.yaml) for this use case as below: +Use the file: [ssharding_shard_prov_send_notification.yaml](./ssharding_shard_prov_send_notification.yaml) for this use case as below: -1. Deploy the `shard_prov_send_notification.yaml` file: +1. Deploy the `ssharding_shard_prov_send_notification.yaml` file: ```sh - kubectl apply -f shard_prov_send_notification.yaml + kubectl apply -f ssharding_shard_prov_send_notification.yaml ``` 2. Check the status of the deployment: ```sh diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md new file mode 100644 index 00000000..1ecb0ec1 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md @@ -0,0 +1,37 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +Use the file: [ssharding_shard_prov.yaml](./ssharding_shard_prov.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md new file mode 100644 index 00000000..889de98c --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md @@ -0,0 +1,48 @@ +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with System-Managed Sharding provisioned using Oracle Database Sharding controller. + +**NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. + +In this use case, the existing database Sharding is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* One Catalog Pod: `catalog` +* Namespace: `shns` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_delshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +NOTE: Use tag `isDelete: enable` to delete the shard you want. + +This use case deletes the shard `shard4` from the above Sharding Topology. + +Use the file: [ssharding_shard_prov_delshard.yaml](./ssharding_shard_prov_delshard.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_delshard.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_delshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + +**NOTE:** After you apply `ssharding_shard_prov_delshard.yaml`, the change may not be visible immediately. When the shard is removed, first the chunks will be moved out of that shard that is going to be deleted. + +To monitor the chunk movement, use the following command: + +```sh +# Switch to the primary GSM Container: +kubectl exec -i -t gsm1-0 -n shns /bin/bash + +# Check the status of the chunks and repeat to observe the chunk movement: +gdsctl config chunks +``` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md new file mode 100644 index 00000000..5086d887 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md @@ -0,0 +1,35 @@ +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed Sharding provisioned earlier using Oracle Database Sharding controller. + +In this use case, the existing Oracle Database sharding topology is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_extshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. + +Use the file: [ssharding_shard_prov_extshard.yaml](./ssharding_shard_prov_extshard.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_extshard.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_extshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard4-0": + kubectl logs -f pod/shard4-0 -n shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml new file mode 100644 index 00000000..1bdb9ce5 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml @@ -0,0 +1,56 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml new file mode 100644 index 00000000..868e8bc1 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml @@ -0,0 +1,59 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "CATALOG_CHUNKS" + value: "120" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml new file mode 100644 index 00000000..3cafeba7 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml @@ -0,0 +1,81 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml new file mode 100644 index 00000000..d7ec6365 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml @@ -0,0 +1,89 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml new file mode 100644 index 00000000..1017a9d5 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml @@ -0,0 +1,67 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + isDelete: enable + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml new file mode 100644 index 00000000..d23052fb --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml @@ -0,0 +1,66 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml new file mode 100644 index 00000000..075919f7 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml @@ -0,0 +1,87 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml new file mode 100644 index 00000000..aea6fc7c --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml @@ -0,0 +1,84 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + nsConfigMap: onsconfigmap + nsSecret: my-secret + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + diff --git a/doc/sharding/provisioning/provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md similarity index 51% rename from doc/sharding/provisioning/provisioning_by_cloning_db_from_gold_image_across_ads.md rename to docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 75a6f743..e55df2de 100644 --- a/doc/sharding/provisioning/provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,6 +1,8 @@ -# Provisioning Oracle Database Sharding Topology by Cloning the Database from Your Own Database Gold Image Across Availability Domains (ADs) +# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) -In this test case, you provision the Oracle Database sharding topology while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the Oracle Database sharding topology with User Defined Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. @@ -17,25 +19,32 @@ NOTE: ```sh kubectl get pv -n shns ``` -2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `udsharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Two sharding Pods: `shard1` and `shard2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. -* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.eu-frankfurt-1.abtheljtjlc7oce3sgq55vnskb4sjdip5sdaighm54hpmlcg7avgc76pjbea` +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* User Defined Sharding is specified using `shardingType: USER` + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `shard_prov_clone_across_ads.yaml`. - * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../README.md#3-oracle-database-and-global-data-services-docker-images) + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_clone_across_ads.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. -Use the file: [shard_prov_clone_across_ads.yaml](./shard_prov_clone_across_ads.yaml) for this use case as below: +Use the file: [udsharding_shard_prov_clone_across_ads.yaml](./udsharding_shard_prov_clone_across_ads.yaml) for this use case as below: -1. Deploy the `shard_prov_clone_across_ads.yaml` file: +1. Deploy the `udsharding_shard_prov_clone_across_ads.yaml` file: ```sh - kubectl apply -f shard_prov_clone_across_ads.yaml + kubectl apply -f udsharding_shard_prov_clone_across_ads.yaml ``` 2. Check the status of the deployment: ```sh diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md new file mode 100644 index 00000000..edd9c484 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -0,0 +1,52 @@ +# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this case, the database is created automatically by cloning from an existing Oracle Database Gold Image during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology is deployed using Oracle Sharding controller. + +This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. + +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. + +**NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. + +1. Check the OCID of the Persistent Volume provisioned earlier using below command: + + ```sh + kubectl get pv -n shns + ``` + +2. This example uses `udsharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` +* User Defined Sharding is specified using `shardingType: USER` + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [udsharding_shard_prov_clone.yaml](./udsharding_shard_prov_clone.yaml) for this use case as below: + +1. Deploy the `udsharding_shard_prov_clone.yaml` file: + ```sh + kubectl apply -f udsharding_shard_prov_clone.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md new file mode 100644 index 00000000..638b7124 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md @@ -0,0 +1,46 @@ +# Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. + +This example uses `udsharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Tags `memory` and `cpu` to control the Memory and CPU of the PODs +* Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level +* User Defined Sharding is specified using `shardingType: USER` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_memory_cpu.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. + +Use the YAML file [udsharding_shard_prov_memory_cpu.yaml](./udsharding_shard_prov_memory_cpu.yaml). + +1. Deploy the `udsharding_shard_prov_memory_cpu.yaml` file: + + ```sh + kubectl apply -f udsharding_shard_prov_memory_cpu.yaml + ``` + +1. Check the details of a POD. For example: To check the details of Pod `shard1-0`: + + ```sh + kubectl describe pod/shard1-0 -n shns + ``` +3. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md new file mode 100644 index 00000000..fe1ca870 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md @@ -0,0 +1,86 @@ +# Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. + +This example uses `udsharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* Configmap to send notification email when a particular operation is completed. For example: When a shard is added. +* User Defined Sharding is specified using `shardingType: USER` + +**NOTE:** + +* The notification will be sent using a configmap created with the credentials of the OCI user account in this use case. + +We will create a topic in Notification Service of the OCI Console and use its OCID. + +To do this: + +1. Create a `configmap_data.txt` file, such as the following, which has the OCI User details that will be used to send notfication: + + ```sh + user=ocid1.user.oc1........fx7omxfq + fingerprint=fa:18:98:...............:8a + tenancy=ocid1.tenancy.oc1..aaaa.......orpn7inq + region=us-phoenix-1 + topicid=ocid1.onstopic.oc1.phx.aaa............6xrq + ``` +2. Create a configmap using the below command using the file created above: + ```sh + kubectl create configmap onsconfigmap --from-file=./configmap_data.txt -n shns + ``` + +3. Create a key file `priavatekey` having the PEM key of the OCI user being used to send notification: + ```sh + -----BEGIN PRIVATE KEY-G---- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCXYxA0DJvEwtVR + +o4OxrunL3L2NZJRADTFR+TDHqrNF1JwbaFBizSdL+EXbxQW1faZs5lXZ/sVmQF9 + . + . + . + zn/xWC0FzXGRzfvYHhq8XT3omf6L47KqIzqo3jDKdgvVq4u+lb+fXJlhj6Rwi99y + QEp36HnZiUxAQnR331DacN+YSTE+vpzSwZ38OP49khAB1xQsbiv1adG7CbNpkxpI + nS7CkDLg4Hcs4b9bGLHYJVY= + -----END PRIVATE KEY----- + ``` +4. Use the key file `privatekey` to create a Kubernetes secret in namespace `shns`: + + ```sh + kubectl create secret generic my-secret --from-file=./privatekey -n shns + ``` + +5. Use this command to check details of the secret that you created: + + ```sh + kubectl describe secret my-secret -n shns + ``` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_send_notification.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [udsharding_shard_prov_send_notification.yaml](./udsharding_shard_prov_send_notification.yaml) for this use case as below: + +1. Deploy the `udsharding_shard_prov_send_notification.yaml` file: + ```sh + kubectl apply -f udsharding_shard_prov_send_notification.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md new file mode 100644 index 00000000..b0378e04 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md @@ -0,0 +1,38 @@ +# Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. + +**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `udsharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* User Defined Sharding is specified using `shardingType: USER` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +Use the file: [udsharding_shard_prov.yaml](./udsharding_shard_prov.yaml) for this use case as below: + +1. Deploy the `udsharding_shard_prov.yaml` file: + ```sh + kubectl apply -f udsharding_shard_prov.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md new file mode 100644 index 00000000..673e455e --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -0,0 +1,66 @@ +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with User Defined Sharding provisioned using Oracle Database Sharding controller. + +In this use case, the existing database Sharding is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* User Defined Sharding is specified using `shardingType: USER` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_delshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +**NOTE:** Use tag `isDelete: enable` to delete the shard you want. + +This use case deletes the shard `shard4` from the above Sharding Topology. + +Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_delshard.yaml) for this use case as below: + +1. Move out the chunks from the shard to be deleted to another shard. For example, in the current case, before deleting the `shard4`, if you want to move the chunks from `shard4` to `shard2`, then you can run the below `kubectl` command where `/u01/app/oracle/product/23ai/gsmhome_1` is the GSM HOME: + ```sh + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard4_shard4pdb" + ``` +2. Confirm the shard to be deleted (`shard4` in this case) is not having any chunk using below command: + ```sh + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "config chunks" + ``` + If there is no chunk present in the shard to be deleted, you can move to the next step. + +3. Apply the `udsharding_shard_prov_delshard.yaml` file: + ```sh + kubectl apply -f udsharding_shard_prov_delshard.yaml + ``` +4. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + ``` + +**NOTE:** +- After you apply `udsharding_shard_prov_delshard.yaml`, the change may not be visible immediately and it may take some time for the delete operation to complete. +- If the shard, that you are trying to delete, is still having chunks, then the you will see message like below in the logs of the Oracle Database Operator Pod. + ```sh + INFO controllers.database.ShardingDatabase manual intervention required + ``` + In this case, you will need to first move out the chunks from the shard to be deleted using Step 2 above and then apply the file in Step 3 to delete that shard. + +To check the status, use the following command: + ```sh + # Switch to the primary GSM Container: + kubectl exec -i -t gsm1-0 -n shns /bin/bash + + # Check the status shards: + gdsctl config shard + + # Check the status of the chunks: + gdsctl config chunks + ``` diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md new file mode 100644 index 00000000..abdc53ff --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md @@ -0,0 +1,36 @@ +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with User Defined Sharding provisioned earlier using Oracle Database Sharding controller. + +In this use case, the existing Oracle Database sharding topology is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* User Defined Sharding is specified using `shardingType: USER` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_extshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. + +Use the file: [udsharding_shard_prov_extshard.yaml](./udsharding_shard_prov_extshard.yaml) for this use case as below: + +1. Deploy the `udsharding_shard_prov_extshard.yaml` file: + ```sh + kubectl apply -f udsharding_shard_prov_extshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard4-0": + kubectl logs -f pod/shard4-0 -n shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml new file mode 100644 index 00000000..c9f20eb3 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml @@ -0,0 +1,57 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace3 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml new file mode 100644 index 00000000..d7e5ce78 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml @@ -0,0 +1,82 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + shardSpace: sspace3 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml new file mode 100644 index 00000000..ae02c7fe --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml @@ -0,0 +1,90 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace3 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml new file mode 100644 index 00000000..d83bf546 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml @@ -0,0 +1,68 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace3 + shardRegion: primary + - name: shard4 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace4 + shardRegion: primary + isDelete: enable + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace5 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml new file mode 100644 index 00000000..7526feb7 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml @@ -0,0 +1,66 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace3 + shardRegion: primary + - name: shard4 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace4 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardSpace: sspace5 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml new file mode 100644 index 00000000..8be81d39 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml @@ -0,0 +1,88 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardSpace: sspace3 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml new file mode 100644 index 00000000..4dda6db9 --- /dev/null +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml @@ -0,0 +1,84 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace2 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + shardSpace: sspace3 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 + gsmImagePullSecret: ocr-reg-cred + shardingType: USER + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + nsConfigMap: onsconfigmap + nsSecret: my-secret + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sidb/PREREQUISITES.md b/docs/sidb/PREREQUISITES.md new file mode 100644 index 00000000..4bf09283 --- /dev/null +++ b/docs/sidb/PREREQUISITES.md @@ -0,0 +1,30 @@ +## Deployment Prerequisites +To deploy Oracle Single Instance Database in Kubernetes using the OraOperator, complete these steps. + +* ### Prepare Oracle Container Images + + You can either build Single Instance Database Container Images from the source, following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance), or you can use the the pre-built images available at [https://container-registry.oracle.com](https://container-registry.oracle.com) by signing in and accepting the required license agreement. + + Oracle Database Releases Supported: Enterprise and Standard Edition for Oracle Database 19c, and later releases. Express Edition for Oracle Database 21.3.0 only. Oracle Database Free 23.2.0 and later Free releases + + Build Oracle REST Data Service Container Images from source following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleRestDataServices](https://github.com/oracle/docker-images/tree/main/OracleRestDataServices). + The supported Oracle REST Data Service version is 21.4.2 + +* ### Ensure Sufficient Disk Space in Kubernetes Worker Nodes + + Provision Kubernetes worker nodes. Oracle recommends you provision them with 250 GB or more free disk space, which is required for pulling the base and patched database container images. If you are doing a Cloud deployment, then you can choose to increase the custom boot volume size of the worker nodes. + +* ### Set Up Kubernetes and Volumes for Database Persistence + + Set up an on-premises Kubernetes cluster, or subscribe to a managed Kubernetes service, such as Oracle Cloud Infrastructure Container Engine for Kubernetes. Use a dynamic volume provisioner or pre-provision static persistent volumes manually. These volumes are required for persistent storage of the database files. + + For more more information about creating persistent volumes, see: [https://kubernetes.io/docs/concepts/storage/persistent-volumes/](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +* ### Minikube Cluster Environment + + By default, when you create a cluster using the `minicube start` command, Minikube creates a node with 2GB RAM, 2 CPUs, and 20GB disk space. However, these resources (particularly disk space and RAM) may not be sufficient for running and managing Oracle Database using the OraOperator. For better performance, Oracle recommends that you configure the cluster to have a larger RAM and disk space than the Minikube default. For example, the following command creates a Minikube cluster with 8GB RAM and 100GB disk space for the Minikube VM: + + ``` + minikube start --memory=8g --disk-size=100g + ``` + diff --git a/docs/sidb/README.md b/docs/sidb/README.md new file mode 100644 index 00000000..35f42f22 --- /dev/null +++ b/docs/sidb/README.md @@ -0,0 +1,1309 @@ +# Managing Oracle Single Instance Databases with Oracle Database Operator for Kubernetes + +Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Instance Database Controller, which enables provisioning, cloning, and patching of Oracle Single Instance Databases on Kubernetes. It also enables configuring the database for Oracle REST Data Services with Oracle APEX development platform. The following sections explain the setup and functionality of the operator + + * [Prerequisites](#prerequisites) + * [Mandatory Resource Privileges](#mandatory-resource-privileges) + * [Optional Resource Privileges](#optional-resource-privileges) + * [OpenShift Security Context Constraints](#openshift-security-context-constraints) + * [SingleInstanceDatabase Resource](#singleinstancedatabase-resource) + * [Create a Database](#create-a-database) + * [New Database](#new-database) + * [Pre-built Database](#pre-built-database) + * [XE Database](#xe-database) + * [Free Database](#free-database) + * [Free Lite Database](#free-lite-database) + * [Oracle True Cache](#oracle-true-cache) + * [Connecting to Database](#connecting-to-database) + * [Database Persistence (Storage) Configuration Options](#database-persistence-storage-configuration-options) + * [Dynamic Persistence](#dynamic-persistence) + * [Storage Expansion](#storage-expansion) + * [Static Persistence](#static-persistence) + * [Configuring a Database](#configuring-a-database) + * [Switching Database Modes](#switching-database-modes) + * [Changing Init Parameters](#changing-init-parameters) + * [Clone a Database](#clone-a-database) + * [Patch a Database](#patch-a-database) + * [Delete a Database](#delete-a-database) + * [Advanced Database Configurations](#advanced-database-configurations) + * [Run Database with Multiple Replicas](#run-database-with-multiple-replicas) + * [Database Pod Resource Management](#database-pod-resource-management) + * [Setup Database with LoadBalancer](#setup-database-with-loadbalancer) + * [Enabling TCPS Connections](#enabling-tcps-connections) + * [Specifying Custom Ports](#specifying-custom-ports) + * [Setup Data Guard Configuration for a Single Instance Database](#setup-data-guard-configuration-for-a-single-instance-database) + * [Create a Standby Database](#create-a-standby-database) + * [Create a Data Guard Configuration](#create-a-data-guard-configuration) + * [Perform a Switchover](#perform-a-switchover) + * [Enable Fast-Start Failover](#enable-fast-start-failover) + * [Convert Standby to Snapshot Standby](#convert-standby-to-snapshot-standby) + * [Static Data Guard Connect String](#static-data-guard-connect-string) + * [Patch Primary and Standby databases](#patch-primary-and-standby-databases) + * [Delete the Data Guard Configuration](#delete-the-data-guard-configuration) + * [Execute Custom Scripts](#execute-custom-scripts) + * [OracleRestDataService Resource](#oraclerestdataservice-resource) + * [REST Enable a Database](#rest-enable-a-database) + * [Provision ORDS](#provision-ords) + * [Database API](#database-api) + * [MongoDB API](#mongodb-api) + * [Advanced Usages](#advanced-usages) + * [Oracle Data Pump](#oracle-data-pump) + * [REST Enabled SQL](#rest-enabled-sql) + * [Database Actions](#database-actions) + * [APEX Installation](#apex-installation) + * [Delete ORDS](#delete-ords) + * [Maintenance Operations](#maintenance-operations) + * [Additional Information](#additional-information) + + +## Prerequisites + +Oracle strongly recommends that you comply with the [prerequisites](./PREREQUISITES.md) and the following requirements + + ### Mandatory Resource Privileges + + Single Instance Database(sidb) controller mandatorily requires the following Kubernetes resource privileges: + + | Resources | Privileges | + | --- | --- | + | Pods | create delete get list patch update watch | + | Containers | create delete get list patch update watch | + | PersistentVolumeClaims | create delete get list patch update watch | + | Services | create delete get list patch update watch | + | Secrets | create delete get list patch update watch | + | Events | create patch | + + For managing the required levels of access, configure [role binding](../../README.md#create-role-bindings-for-access-management) + + ### Optional Resource Privileges + + Single Instance Database(`sidb`) controller optionally requires the following Kubernetes resource privileges, depending on the functionality being used: + + | Functionality | Resources | Privileges | + | --- | --- | --- | + | NodePort Services | Nodes | list watch | + | Storage Expansion with block volumes | StorageClasses | get list watch | + | Custom Scripts Execution | PersistentVolumes | get list watch | + + + For exposing the database using Nodeport services, apply [RBAC](../../rbac/node-rbac.yaml) + ```sh + kubectl apply -f rbac/node-rbac.yaml + ``` + For automatic storage expansion of block volumes, apply [RBAC](../../rbac/storage-class-rbac.yaml) + ```sh + kubectl apply -f rbac/storage-class-rbac.yaml + ``` + For automatic execution of custom scripts after database setup or startup, apply [RBAC](../../rbac/persistent-volume-rbac.yaml) + ```sh + kubectl apply -f rbac/persistent-volume-rbac.yaml + ``` + + ### OpenShift Security Context Constraints + + OpenShift requires additional Security Context Constraints (SCC) for deploying and managing the `SingleInstanceDatabase` resource. To create the appropriate SCCs before deploying the `SingleInstanceDatabase` resource, complete these steps: + + 1. Create a new project/namespace for deploying the `SingleInstanceDatabase` resource + + ```sh + oc new-project sidb-ns + ``` + + **Note:** OpenShift recommends that you should not deploy in namespaces starting with `kube`, `openshift` and the `default` namespace. + + 2. Apply the file [openshift_rbac.yaml](../../config/samples/sidb/openshift_rbac.yaml) with cluster-admin user privileges. + + ```sh + oc apply -f openshift-rbac.yaml + ``` + + Running this example procedure results in creation of SCC (Security Context Constraints) and serviceaccount `sidb-sa` in the namespace `sidb-ns`, which has access to the SCC. + + **Note:** This configuration yaml file example binds the SCC to the serviceaccount `sidb-sa` in namespace `sidb-ns`. For any other project/namespace, you must update the file appropriately with the namespace before applying this example. + + 3. Set the `serviceAccountName` attribute to `sidb-sa` and the namespace to `sidb-ns` in **[config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)** before deploying the SingleInstanceDatabase resource. + +## SingleInstanceDatabase Resource + +The Oracle Database Operator creates the `SingleInstanceDatabase` as a custom resource. Doing this enables Oracle Database to be managed as a native Kubernetes object. In this document, we will refer to the `SingleInstanceDatabase` resource as the database. + +### Resource Details + +#### Database List +To list databases, use the following command as an example, where the database names are `sidb-sample` and `sidb-sample-clone`, which are the names we will use as database names in command examples: + +```sh +$ kubectl get singleinstancedatabases -o name + + singleinstancedatabase.database.oracle.com/sidb-sample + singleinstancedatabase.database.oracle.com/sidb-sample-clone + +``` + +#### Quick Status +To obtain a quick database status, use the following command as an example: + +```sh +$ kubectl get singleinstancedatabase sidb-sample + +NAME EDITION STATUS VERSION CONNECT STR TCPS CONNECT STR OEM EXPRESS URL +sidb-sample Enterprise Healthy 19.3.0.0.0 10.0.25.54:1521/ORCL1 Unavailable https://10.0.25.54:5500/em +``` + +#### Detailed Status +To obtain a detailed database status, use the following command as an example: + +```sh +$ kubectl describe singleinstancedatabase sidb-sample-clone + + Name: sidb-sample-clone + Namespace: default + Labels: + Annotations: + API Version: database.oracle.com/v1alpha1 + Kind: SingleInstanceDatabase + Metadata: .... + Spec: .... + Status: + Cluster Connect String: sidb-sample-clone.default:1521/ORCL1C + Conditions: + Last Transition Time: (YYYY-MM-DD)T(HH:MM:SS)Z + Message: Waiting for database to be ready + Observed Generation: 2 + Reason: LastReconcileCycleQueued + Status: True + Type: ReconcileQueued + Last Transition Time: 2021-06-30T11:07:56Z + Message: processing datapatch execution + Observed Generation: 3 + Reason: LastReconcileCycleBlocked + Status: True + Type: ReconcileBlocked + Last Transition Time: (YYYY-MM-DD)T(HH:MM:SS)Z + Message: no reconcile errors + Observed Generation: 3 + Reason: LastReconcileCycleCompleted + Status: True + Type: ReconcileComplete + Connect String: 10.0.25.58:1521/ORCL1C + Datafiles Created: true + Datafiles Patched: true + Edition: Enterprise + Flash Back: true + Force Log: false + Oem Express URL: https://10.0.25.58:5500/em + Pdb Name: orclpdb1 + Release Update: 19.11.0.0.0 + Replicas: 2 + Role: PRIMARY + Sid: ORCL1C + Status: Healthy + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Database Pending 35m (x2 over 35m) SingleInstanceDatabase waiting for database pod to be ready + Normal Database Creating 27m (x24 over 34m) SingleInstanceDatabase waiting for database to be ready + Normal Database Ready 22m SingleInstanceDatabase database open on pod sidb-sample-clone-133ol scheduled on node 10.0.10.6 + Normal Datapatch Pending 21m SingleInstanceDatabase datapatch execution pending + Normal Datapatch Executing 20m SingleInstanceDatabase datapatch begin execution + Normal Datapatch Done 8s SingleInstanceDatabase datafiles patched from 19.3.0.0.0 to 19.11.0.0.0 : SUCCESS + +``` + +### Template YAML + +The template `.yaml` file for Single Instance Database (Enterprise and Standard Editions), including all the configurable options, is available at: +**[`config/samples/sidb/singleinstancedatabase.yaml`](./../../config/samples/sidb/singleinstancedatabase.yaml)** + +**Note:** +The `adminPassword` field in the above `singleinstancedatabase.yaml`example file refers to a Secret for the SYS, SYSTEM and PDBADMIN users of the Single Instance Database. This Secret is required when you provision a new database, or when you clone an existing database. + +Create this Secret using the following command as an example: + + kubectl create secret generic db-admin-secret --from-literal=oracle_pwd= + +This command creates a Secret named `db-admin-secret`, with the key `oracle_pwd` mapped to the actual password specified in the command. + +### Create a Database + +#### New Database + +To provision a new database instance on the Kubernetes cluster, use the example **[`config/samples/sidb/singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. + +1. Log into [Oracle Container Registry](https://container-registry.oracle.com/) and accept the license agreement for the Database image; ignore if you have accepted the license agreement already. + +2. If you have not already done so, create an image pull secret for the Oracle Container Registry: + + ```sh + $ kubectl create secret docker-registry oracle-container-registry-secret --docker-server=container-registry.oracle.com --docker-username='' --docker-password='' --docker-email='' + + secret/oracle-container-registry-secret created + ``` + Note: Generate the auth token from user profile section on top right of the page after logging into container-registry.oracle.com + + This secret can also be created from the docker config.json or from podman auth.json after a successful login + ```sh + docker login container-registry.oracle.com + kubectl create secret generic oracle-container-registry-secret --from-file=.dockerconfigjson=.docker/config.json --type=kubernetes.io/dockerconfigjson + ``` + or + ```sh + podman login container-registry.oracle.com + kubectl create secret generic oracle-container-registry-secret --from-file=.dockerconfigjson=${XDG_RUNTIME_DIR}/containers/auth.json --type=kubernetes.io/dockerconfigjson + ``` +3. Provision a new database instance on the cluster by using the following command: + + ```sh + $ kubectl apply -f singleinstancedatabase_create.yaml + + singleinstancedatabase.database.oracle.com/sidb-sample created + ``` + +**Note:** +- For ease of use, the storage class **oci-bv** is specified in the **[`singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. This storage class facilitates dynamic provisioning of the OCI block volumes on the Oracle OKE for persistent storage of the database. The supported access mode for this class is `ReadWriteOnce`. For other cloud providers, you can similarly use their dynamic provisioning storage classes. +- It is beneficial to have the database replica pods more than or equal to the number of available nodes if `ReadWriteMany` access mode is used with the OCI NFS volume. By doing so, the pods get distributed on different nodes and the database image is downloaded on all those nodes. This helps in reducing time for the database fail-over if the active database pod dies. +- Supports Oracle Database Enterprise Edition (19.3.0), and later releases. +- To pull the database image faster from the container registry, so that you can bring up the SIDB instance quickly, you can use the `container-registry mirror` of the corresponding cluster's region. For example, if the cluster exists in Mumbai region, then you can use the `container-registry-bom.oracle.com` mirror. For more information on container-registry mirrors, see: [https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure](https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure). +- To update the initialization (init) parameters, such as `sgaTarget` and `pgaAggregateTarget`, see the `initParams` section of the [`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file. + +#### Pre-built Database + +To provision a new pre-built database instance, use the sample **[`config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file. For example: +```sh +$ kubectl apply -f singleinstancedatabase_prebuiltdb.yaml + + singleinstancedatabase.database.oracle.com/prebuiltdb-sample created +``` + +This pre-built image includes the data files of the database inside the image itself. As a result, the database startup time of the container is reduced, down to a couple of seconds. The pre-built database image can be very useful in continuous integration/continuous delivery (CI/CD) scenarios, in which databases are used for conducting tests or experiments, and the workflow is simple. + +To build the pre-built database image for the Enterprise/Standard edition, follow these instructions: [Pre-built Database (prebuiltdb) Extension](https://github.com/oracle/docker-images/blob/main/OracleDatabase/SingleInstance/extensions/prebuiltdb/README.md). + +#### XE Database +To provision a new Oracle Database Express Edition (XE) database, use the sample **[config/samples/sidb/singleinstancedatabase_express.yaml](../../config/samples/sidb/singleinstancedatabase_express.yaml)** file. For example: + + kubectl apply -f singleinstancedatabase_express.yaml + +This command pulls the XE image available in [Oracle Container Registry](https://container-registry.oracle.com/). + +**Note:** +- Provisioning Oracle Database Express Edition is supported for release 21c (21.3.0) only. Oracle Database Free replaces Oracle Database Express Edition. +- For XE database, only single replica mode (i.e. `replicas: 1`) is supported. +- For XE database, you **cannot change** the init parameters, such as `cpuCount, processes, sgaTarget or pgaAggregateTarget`. + +#### Free Database +To provision new Oracle Database Free, use the sample **[config/samples/sidb/singleinstancedatabase_free.yaml](../../config/samples/sidb/singleinstancedatabase_free.yaml)** file. For example: + + kubectl apply -f singleinstancedatabase_free.yaml + +This command pulls the Free image available in [Oracle Container Registry](https://container-registry.oracle.com/). + +#### Free Lite Database +To provision new Oracle Database Free Lite, use the sample **[config/samples/sidb/singleinstancedatabase_free-lite.yaml](../../config/samples/sidb/singleinstancedatabase_free-lite.yaml)** file. For example: + + kubectl apply -f singleinstancedatabase_free-lite.yaml + +This command pulls the Free lite image available in [Oracle Container Registry](https://container-registry.oracle.com/). + +**Note:** +- Provisioning Oracle Database Free is supported for release 23.3.0 and later releases. +- For Free database, only single replica mode (such as `replicas: 1`) is supported. +- For Free database, you **cannot change** the init parameters. These include parameters such as `cpuCount, processes, sgaTarget or pgaAggregateTarget`. +- Oracle Enterprise Manager Express (OEM Express) is not supported in release 23.3.0 and later releases. + +#### Oracle True Cache +Oracle True Cache is an in-memory, consistent, and automatically managed cache for Oracle Database. +To provision a True Cache instance for Oracle Free Database in Kubernetes, use the sample **[`config/samples/sidb/singleinstancedatabase_free-truecache.yaml`](../../config/samples/sidb/singleinstancedatabase_free-truecache.yaml)** file. For example + + kubectl apply -f singleinstancedatabase_free-truecache.yaml + +#### Additional Information +You are required to specify the database administrative user (admin) password Secret in the corresponding YAML file. The default values mentioned in the `adminPassword.secretName` fields of [`singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml), [`singleinstancedatabase_prebuiltdb.yaml`](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml), [`singleinstancedatabase_express.yaml`](../../config/samples/sidb/singleinstancedatabase_express.yaml) and [`singleinstancedatabse_free.yaml`](../../config/samples/sidb/singleinstancedatabase_free.yaml) files are `db-admin-secret`, `prebuiltdb-admin-secret`, `xedb-admin-secret` and `free-admin-secret` respectively. You can create these Secrets manually by using the sample command mentioned in the [`Template YAML`](#template-yaml) section. Alternatively, you can create these Secrets by filling in the passwords in the **[`singleinstancedatabase_secrets.yaml`](../../config/samples/sidb/singleinstancedatabase_secrets.yaml)** file and applying them using the following command: + +```bash +kubectl apply -f singleinstancedatabase_secrets.yaml +``` + +### Connecting to Database + +Creating a new database instance takes a while. When the `status` column returns the response `Healthy`, the database is open for connections. + +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.status}" + + Healthy +``` + +Clients can obtain the connect string to the CDB from `.status.connectString`, and the connect string to the PDB from `.status.pdbConnectString`. For example: + +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.connectString}" + + 10.0.25.54:1521/ORCL +``` +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.pdbConnectString}" + + 10.0.25.54:1521/ORCLPDB +``` + +To connect to the database using the connect strings returned by the commands above, you can use any supported client, or use SQLPlus. For example: +```sh +$ sqlplus sys/<.spec.adminPassword>@10.0.25.54:1521/ORCL as sysdba + +SQL*Plus: Release 19.0.0.0.0 - Production on Wed May 4 16:00:49 2022 +Version 19.14.0.0.0 + +Copyright (c) 1982, 2021, Oracle. All rights reserved. + + +Connected to: +Oracle Database 21c Express Edition Release 21.0.0.0.0 - Production +Version 21.3.0.0.0 + +SQL> +``` +**Note:** The `<.spec.adminPassword>` above refers to the database password for SYS, SYSTEM and PDBADMIN users, which in turn represented by `spec` section's `adminPassword` field of the **[config/samples/sidb/singleinstancedatabase.yaml](../config/samples/sidb/../../../../config/samples/sidb/singleinstancedatabase.yaml)** file. + +The Oracle Database inside the container also has Oracle Enterprise Manager Express (OEM Express) as a basic observability console. To access OEM Express, start the browser, and paste in a URL similar to the following example: + +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.oemExpressUrl}" + + https://10.0.25.54:5500/em +``` +**Note:** OEM Express is not available for 23.3.0 and later releases + +### Database Persistence (Storage) Configuration Options +You can configure database persistence in the following two ways: +- Dynamic Persistence Provisioning +- Static Persistence Provisioning + +#### Dynamic Persistence +In **Dynamic Persistence Provisioning**, a persistent volume is provisioned by mentioning a storage class. For example, **oci-bv** storage class is specified in the **[singleinstancedatabase_create.yaml](../../config/samples/sidb/singleinstancedatabase_create.yaml)** file. This storage class facilitates dynamic provisioning of the OCI block volumes. The supported access mode for this class is `ReadWriteOnce`. For other cloud providers, you can similarly use their dynamic provisioning storage classes. + +**Note:** +- Generally, the `Reclaim Policy` of such dynamically provisioned volumes is `Delete`. These volumes are deleted when their corresponding database deployment is deleted. To retain volumes, use static provisioning, as explained in the Block Volume Static Provisioning section. +- In **Minikube**, the dynamic persistence provisioning class is **standard**. + +#### Storage Expansion +When using dynamic persistence, you can at any time scale up your persistent volumes by simply patching the singleinstancedatabase resource using the following command : +```sh +$ kubectl patch singleinstancedatabase sidb-sample -p '{"spec":{"persistence":{"size":"100Gi"}}}' --type=merge +``` + +**Note:** +- Storage expansion requires the storage class to be configured with `allowVolumeExpansion:true` +- Storage expansion requires read and watch access for storage account as mentioned in [prerequisites](#prerequisites) +- User can only scale up a volume/storage and not scale down + +#### Static Persistence +In **Static Persistence Provisioning**, you must create a volume manually, and then use the name of this volume with the `<.spec.persistence.datafilesVolumeName>` field, which corresponds to the `datafilesVolumeName` field of the persistence section in the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. The `Reclaim Policy` of such volumes can be set to `Retain`. When this policy is set, the volume is not deleted when its corresponding deployment is deleted. +For example in **Minikube**, a persistent volume can be provisioned using the following yaml file example: +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: db-vol +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + hostPath: + path: /data/oradata +``` +The persistent volume name (in this case, `db-vol`) can be mentioned in the `datafilesVolumeName` field of the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. `storageClass` field is not required in this case, and can be left empty. + +Static Persistence Provisioning in Oracle Cloud Infrastructure (OCI) is explained in the following subsections: + +##### OCI Block Volume Static Provisioning +With block volume static provisioning, you must manually create a block volume resource from the OCI console, and fetch its `OCID`. To create the persistent volume, you can use the following YAML file: +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: block-vol +spec: + capacity: + storage: 1024Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + csi: + driver: blockvolume.csi.oraclecloud.com + volumeHandle: +``` + +**Note:** OCI block volumes are AD (Availability Domain) specific. Ensure that the database is deployed in the same AD as that of its statically provisioned block volume. In dynamic provisioning, this is done automatically. +To provision the database in a specific AD, uncomment the following line from the **[singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)** file: + +```yaml +nodeSelector: + topology.kubernetes.io/zone: PHX-AD-1 +``` + +##### OCI NFS Volume Static Provisioning +Similar to the block volume static provisioning, you have to manually create a file system resource from the OCI console, and fetch its `OCID, Mount Target IP Address and Export Path`. Mention these values in the following YAML file to create the persistent volume: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-vol +spec: + capacity: + storage: 1024Gi + volumeMode: Filesystem + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + csi: + driver: fss.csi.oraclecloud.com + volumeHandle: "::/" +``` + +**Note:** +- Example volumeHandle in the above config file : + + `volumeHandle: "ocid1.filesystem.oc1.eu_frankfurt_1.aaaaaqe3bj...eaaa:10.0.10.156:/FileSystem-20220713-1036-02"` + +- Whenever a mount target is provisioned in OCI, its `Reported Size (GiB)` values are very large. This is visible on the mount target page when logged in to the OCI console. Some applications will fail to install if the results of a space requirements check show too much available disk space. So in the OCI Console, click the little "Pencil" icon besides the **Reported Size** parameter of the Mount Target to specify, in gigabytes (GiB), the maximum capacity reported by file systems exported through this mount target. This setting does not limit the actual amount of data you can store. + +- You must open the required ports to access the NFS volume from the K8S cluster. Add the required ports to the security list of the subnet to which your K8S nodes are connected. For more information, see **[Security Lists File Storage](https://docs.oracle.com/en-us/iaas/Content/File/Tasks/securitylistsfilestorage.htm)** for the details. + +### Configuring a Database +The `OraOperator` facilitates you to configure the database. Various database configuration options are explained in the following subsections: + +#### Switching Database Modes +The following database modes can be updated after the database is created: + +- `flashBack` +- `archiveLog` +- `forceLog` + +To change these modes, change their attribute values, and apply the change by using the +`kubectl apply` or `kubectl edit/patch` commands. + +**Caution**: Enable `archiveLog` mode before setting `flashBack` to `ON`, and set `flashBack` to `OFF` before disabling `archiveLog` mode. + +For example: + +```sh +$ kubectl patch singleinstancedatabase sidb-sample --type merge -p '{"spec":{"forceLog": true}}' + + singleinstancedatabase.database.oracle.com/sidb-sample patched +``` +Check the Database Config Status by using the following command: + +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath=[{.status.archiveLog}, {.status.flashBack}, {.status.forceLog}]" + + [true, true, true] +``` + +#### Changing Init Parameters + +The following database initialization parameters can be updated after the database is created: + +- sgaTarget +- pgaAggregateTarget +- cpuCount +- processes. + +Change their attribute values and apply using `kubectl apply` or `kubectl edit/patch` commands. + +**Note:** +The value for the initialization parameter `sgaTarget` that you provide should be within the range set by [sga_min_size, sga_max_size]. If the value you provide is not in that range, then `sga_target` is not updated to the value you specify for `sgaTarget`. + +#### Immutable YAML Attributes + +The following attributes cannot be modified after creating the Single Instance Database instance: + +- `sid` +- `edition` +- `charset` +- `pdbName` +- `primaryDatabaseRef` + +If you attempt to change one of these attributes, then you receive an error similar to the following: + +```sh +$ kubectl --type=merge -p '{"spec":{"sid":"ORCL1"}}' patch singleinstancedatabase sidb-sample + + The SingleInstanceDatabase "sidb-sample" is invalid: spec.sid: Forbidden: cannot be changed +``` + +### Clone a Database + +To create copies of your existing database quickly, you can use the cloning functionality. A cloned database is an exact, block-for-block copy of the source database. Cloning is much faster than creating a fresh database and copying over the data. + +To quickly clone the existing database `sidb-sample` we previously created for this document, use the sample **[`config/samples/sidb/singleinstancedatabase_clone.yaml`](../../config/samples/sidb/singleinstancedatabase_clone.yaml)** file. + +For example: + +```sh + +$ kubectl apply -f singleinstancedatabase_clone.yaml + + singleinstancedatabase.database.oracle.com/sidb-sample-clone created +``` + +**Note:** +- To clone a database, the source database must have archiveLog mode set to true. +- The clone database can specify a database image that is different from the source database. In such cases, cloning is supported only between databases of the same major release. +- Only enterprise and standard editions support cloning. + +### Patch a Database + +Databases running in your cluster and managed by the Oracle Database operator can be patched or rolled back between release updates of the same major release. To patch databases, specify an image of the higher release update. To roll back databases, specify an image of the lower release update. + +Patched Oracle Docker images can be built by using this [patching extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/patching). + +#### Patch + +To patch an existing database, edit and apply the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: + +```sh +kubectl --type=merge -p '{"spec":{"image":{"pullFrom":"patched-image:tag","pullSecrets":"pull-secret"}}}' patch singleinstancedatabase sidb-sample + +singleinstancedatabase.database.oracle.com/sidb-sample patched + +``` + +After patching is complete, the database pods are restarted with the new release update image. + +**Note:** +- Only Enterprise and Standard Editions support patching. + +#### Patch after Cloning + +To clone and patch the database at the same time, clone your source database by using the [cloning existing database](#clone-existing-database) method, and specify a new release image for the cloned database. Use this method to ensure there are no patching related issues impacting your database performance or functionality. + +#### Datapatch Status + +Patching/Rollback operations are complete when the datapatch tool completes patching or rollback of the data files. Check the data files patching status +and current release update version using the following commands + +```sh +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.datafilesPatched}" + + true + +$ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.releaseUpdate}" + + 19.3.0.0.0 +``` + +#### Rollback +You can roll back to a prior database version by specifying the old image in the `image` field of the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file, and applying it using the following command: + +```bash +kubectl apply -f singleinstancedatabase_patch.yaml +``` + +This can also be done using the following command: + +```sh +kubectl --type=merge -p '{"spec":{"image":{"pullFrom":"old-image:tag","pullSecrets":"pull-secret"}}}' patch singleinstancedatabase sidb-sample + +singleinstancedatabase.database.oracle.com/sidb-sample patched + +``` + +### Delete a Database +To delete the database, run the following command : + +```bash +kubectl delete singleinstancedatabase.database.oracle.com sidb-sample +``` +This command will delete the database pods and associated service. + +### Advanced Database Configurations +Some advanced database configuration scenarios are as follows: + +#### Run Database with Multiple Replicas +In multiple replicas mode, more than one pod is created for the database. Setting the replica count equal to or more than the number of worker nodes helps in distributing the replicas accross all the nodes that have access to the database persistent storage volume. +The database is open and mounted by one of the replica pods. Other replica pods have the database instance started but not mounted, and serve to provide a quick cold fail-over in case the active pod goes down. + +To enable multiple replicas, update the replica attribute in the `.yaml`, and apply by using the `kubectl apply` or `kubectl scale` commands. + +The following table depicts the fail over matrix for any destructive operation to the primary replica pod + +| Pod Destructive Operation | Pod Restart/FailOver| + | --- | --- | + | Database instance crash | Yes | + | Force delete pod with zero grace period | Yes | + | Gracefully delete pod | Yes | + | Node running primary replica dies | Yes | + | Direct shutdown [All modes] | Yes | + | Maintenance shutdown [All modes] | No | + | PDB close | No | + +**Note:** +- Maintence shutdown/startup can be run by using the scripts `/home/oracle/shutDown.sh` and `/home/oracle/startUp.sh` +- This functionality requires the [k8s extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/k8s) extended images. The database image from the container registry `container-registry.oracle.com` includes the K8s extension. +- Because Oracle Database Express Edition (XE) does not support [k8s extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/k8s), it does not support multiple replicas. +- If the `ReadWriteOnce` access mode is used, then all the replicas will be scheduled on the same node where the persistent volume would be mounted. +- If the `ReadWriteMany` access mode is used, then all the replicas will be distributed on different nodes. For this reason, Oracle recommends that you have replicas more than or equal to the number of the nodes, because the database image is downloaded on all those nodes. This is beneficial in quick cold fail-over scenario (when the active pod dies) as the image would already be available on that node. + +#### Database Pod Resource Management +When creating a Single Instance Database, you can specify the CPU and memory resources needed by the database pod. These specified resources are passed to the `kube-scheduler` so that the pod is scheduled on one of the pods that has the required resources available. To use database pod resource management, specify values for the `resources` attributes in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, and apply it. + +#### Database Pod Resource Management +When creating a Single Instance Database you can specify the cpu and memory resources needed by the database pod. These specified resources are passed to the `kube-scheduler` so that the pod gets scheduled on one of the pods that has the required resources available. To use database pod resource management specify values for the `resources` attributes in the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file, and apply it. + +#### Setup Database with LoadBalancer +For the Single Instance Database, the default service is the `NodePort` service. You can enable the `LoadBalancer` service by using the `kubectl patch` command. + +For example: + +```sh +$ kubectl --type=merge -p '{"spec":{"loadBalancer": true}}' patch singleinstancedatabase sidb-sample + + singleinstancedatabase.database.oracle.com/sidb-sample patched +``` + +### Enabling TCPS Connections +You can enable TCPS connections in the database by setting the `enableTCPS` field to `true` in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, and applying it. + +Alternatively, you can use the following command: +```bash +kubectl patch --type=merge singleinstancedatabases.database.oracle.com sidb-sample -p '{"spec": {"enableTCPS": true}}' +``` +By default self signed certs are used for TCPS connections. The TCPS connections status can also be queried by the following command: +```bash +kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.isTcpsEnabled}" +true +``` + +**With Self Signed Certs** +- When TCPS is enabled, a self-signed certificate is generated and stored in wallets. For users' convenience, a client-side wallet is generated in location `/opt/oracle/oradata/clientWallet/$ORACLE_SID` in the pod. +- The self-signed certificate used with TCPS has validity for 1 year. After the certificate is expired, it will be renewed by the `OraOperator` automatically. Download the wallet again after auto-renewal. +- You can set the certificate renew interval with the help of `tcpsCertRenewInterval` field in the **[config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)** file. The minimum accepted value is 24h, and the maximum value is 8760h (1 year). The certificates used with TCPS will automatically be renewed after this interval. If this field is omitted/commented in the yaml file, the certificates will not be renewed automatically. +- When the certificate gets created/renewed, the `.status.certCreationTimestamp` status variable gets updated accordingly. You can see this timestamp by using the following command: + ```bash + kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.certCreationTimestamp}" + ``` + +**With User Provided Certs** +- Users can provide custom certs to be used for TCPS connections instead of self signed ones. +- Specify the certs by creating a Kubernetes tls secret resource using following command: + ```bash + kubectl create secret tls my-tls-secret --cert=path/to/cert/tls.crt --key=path/to/key/tls.key + ``` +- `tls.crt` is a certificate chain in the order of client, followed by intermediate and then root certificate and `tls.key` is client key. +- Specify the secret created above (`my-tls-secret`) as the value for the attribute `tcpsTlsSecret` in the [config/samples/sidb/singleinstancedatabase_tcps.yaml](../../config/samples/sidb/singleinstancedatabase_tcps.yaml) file, and apply it. + +**Connecting to the Database using TCPS** +- Download the wallet from the Persistent Volume (PV) attached with the database pod. The location of the wallet inside the pod is as `/opt/oracle/oradata/clientWallet/$ORACLE_SID`. Let us assume the `ORACLE_SID` is `ORCL1`, and singleinstance database resource name is `sidb-sample` for the upcoming example command. You can copy the wallet to the destination directory by the following command: + ```bash + kubectl cp $(kubectl get pods -l app=sidb-sample -o=jsonpath='{.items[0].metadata.name}'):/opt/oracle/oradata/clientWallet/ORCL1 + ``` +- This wallet includes the sample `tnsnames.ora` and `sqlnet.ora` files. All the TNS entries for the database (corresponding to the CDB and PDB) reside in the `tnsnames.ora` file. Switch to the downloaded wallet directory and set the `TNS_ADMIN` environment variable to point to the current directory as follows: + ```bash + cd + export TNS_ADMIN=$(pwd) + ``` + After this, connect with SQL*Plus, using the following example commands: + ```bash + sqlplus sys@ORCL1 as sysdba + ``` + +### Specifying Custom Ports +As mentioned in the section [Setup Database with LoadBalancer](#setup-database-with-loadbalancer), there are two kubernetes services possible for the database: NodePort and LoadBalancer. You can specify which port to use with these services by editing the `listenerPort` and `tcpsListenerPort` fields of the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file. + +`listenerPort` is intended for normal database connections. Similarly, `tcpsListenerPort` is intended for TCPS database connections. + +If the `LoadBalancer` is enabled, then the `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Load Balancer for normal and TCPS database connections respectively. When the `LoadBalancer` is enabled, the default values of `listenerPort` and `tcpsListenerPort` are 1521 and 2484. + +If the `NodePort` service is enabled, then the `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Kubernetes nodes for for normal and TCPS database connections respectively. In this case, the allowed range for the `listenerPort`, and `tcpsListenerPort` is 30000-32767. + +**Note:** +- `listenerPort` and `tcpsListenerPort` cannot have same values. +- `tcpsListenerPort` will come into effect only when TCPS connections are enabled (specifically, the `enableTCPS` field is set in [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file). +- If TCPS connections are enabled, and `listenerPort` is commented or removed in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, then only the TCPS endpoint will be exposed. +- If LoadBalancer is enabled, and either `listenerPort` or `tcpsListenerPort` is changed, then it takes some time to complete the work requests (drain existing backend sets and create new ones). During this time, the database connectivity is broken, although `SingleInstanceDatabase` and `LoadBalancer` remain in a healthy state. To check the progress of the work requests, you can by log in to the Cloud provider's console and check the corresponding LoadBalancer. + +### Setup Data Guard Configuration for a Single Instance Database + +### Create a Standby Database + +#### Prerequisites +- Before creating a Standby, ensure that ArchiveLog, FlashBack, and ForceLog on the primary Single Instance Database(`.spec.primaryDatabaseRef`) are turned on. +- Standby database is not supported for TCPS-enabled Primary databases. + +#### Template YAML +To create a standby database, edit and apply the example YAML file [`config/samples/sidb/singleinstancedatabase_standby.yaml`](../../config/samples/sidb/singleinstancedatabase_standby.yaml). + +**Note:** +- The `adminPassword` field of the above [`config/samples/sidb/singleinstancedatabase_standby.yaml`](../../config/samples/sidb/singleinstancedatabase_standby.yaml) contains an admin password Secret of the primary database referred to for Standby Database creation. By default `keepSecret` is set to `true`, which means that the secret is saved. However, if you want to delete the Secret after the database pod becomes ready, then this Secret will be deleted if the `keepSecret` attribute of `adminPassword` field is set to `false`. . +- Specify the primary database with which the standby database is associateed in the `.spec.primaryDatabaseRef` yaml file. +- The `.spec.createAs` field of the yaml file should be set to "standby". +- Database configuration, such as `Archivelog`, `FlashBack`, `ForceLog`, `TCPS connections`, are not supported for standby database. + +#### List Standby Databases +To list the standby databases, use the `get singleinstancedatabase` command. For example: + +```sh +kubectl get singleinstancedatabase + +NAME EDITION STATUS ROLE VERSION CONNECT STR TCPS CONNECT STR OEM EXPRESS URL +sidb-19 Enterprise Healthy PRIMARY 19.3.0.0.0 10.25.0.26:1521/ORCL1 Unavailable https://10.25.0.26:5500/em +stdby-1 Enterprise Healthy PHYSICAL_STANDBY 19.3.0.0.0 10.25.0.27:32392/ORCLS1 Unavailable https://10.25.0.27:30329/em + +``` + +### Query Primary Database Reference +You can query the corresponding primary database for every standby database. For example: + +```sh +kubectl get singleinstancedatabase stdby-1 -o "jsonpath={.status.primaryDatabase}" +sidb-19 +``` + +#### Creation Status + + Creating a new standby database instance takes a while. When the 'status' status returns the response "Healthy", the database is open for connections. For example: + + ```sh +$ kubectl get singleinstancedatabase stdby-1 -o "jsonpath={.status.status}" + + Healthy +``` + +### Create a Data Guard Configuration + +#### Template YAML + +After creating standbys, set up an Oracle Data Guard (Data Guard) configuration with protection mode, and switch over capability using the following example YAML: +[`config/samples/sidb/dataguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml) + +#### Create DataGuardBroker Resource + +To use the Data Guard broker, provision a new `dataguardbroker` custom resource for a single instance database(`.spec.primaryDatabaseRef`) by specifying the appropriate values for the primary and standby databases in the example `.yaml` file, and running the following command: + +```sh +$ kubectl create -f dataguardbroker.yaml + + dataguardbroker.database.oracle.com/dataguardbroker-sample created +``` +**Note:** The following attributes cannot be patched after you create the `dataguardbroker` resource: `primaryDatabaseRef, protectionMode` + +#### DataguardBroker List + +To list the Data Guard broker resources, use the following command: + +```sh + $ kubectl get dataguardbroker -o name + + dataguardbroker.database.oracle.com/dataguardbroker-sample + +``` + +#### Quick Status +You can obtain a quick status of Data Guard broker by using the following command: + +```sh + $ kubectl get dataguardbroker dataguardbroker-sample + + NAME PRIMARY STANDBYS PROTECTION MODE CONNECT STR STATUS + dataguardbroker-sample ORCL ORCLS1,ORCLS2 MaxAvailability 10.0.25.85:31555/DATAGUARD Healthy + +``` + +#### Detailed Status +To obtain more detailed Data Guard broker status, use this command: + +```sh + $ kubectl describe dataguardbroker dataguardbroker-sample + + Name: dataguardbroker-sample + Namespace: default + Labels: + Annotations: + API Version: database.oracle.com/v1alpha1 + Kind: DataguardBroker + Metadata: + Creation Timestamp: 2023-01-23T04:29:04Z + Finalizers: + database.oracle.com/dataguardbrokerfinalizer + Generation: 3 + Managed Fields: + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + ... + Manager: manager + Operation: Update + Time: 2023-01-23T04:30:20Z + API Version: database.oracle.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + ... + Manager: kubectl-client-side-apply + Operation: Update + Time: 2023-01-23T04:44:40Z + Resource Version: 75178376 + UID: c04a3d88-2018-4f7f-b232-b74d6c3d9479 + Spec: + Admin Password: + Keep Secret: true + Secret Key: oracle_pwd + Secret Name: db-secret + Fast Start Failover: false + Primary Database Ref: sidb-sample + Protection Mode: MaxAvailability + Set As Primary Database: + Standby Database Refs: + standby-sample-1 + standby-sample-2 + Status: + Cluster Connect String: dataguardbroker-sample.default:1521/DATAGUARD + External Connect String: 10.0.25.85:31167/DATAGUARD + Fast Start Failover: false + Primary Database: OR19E3 + Standby Databases: OR19E3S1,OR19E3S2 + Status: Healthy + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SUCCESS 42m DataguardBroker + Normal DG Configuration up to date 24m (x13 over 56m) DataguardBroker +``` + +### Perform a Switchover + +Specify the approppriate database system identifier (SID) (the SID of one of `.spec.primaryDatabaseRef` , `.spec.standbyDatabaseRefs[]`) to be set primary in the `.spec.setAsPrimaryDatabase` of [`dataguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml) and apply the yaml file. + +When you apply the YAML file, the database you specify will be set to primary. However, if the database specified with the `apply` command is already the primary, then this command has no effect: + +```sh +$ kubectl apply -f dataguardbroker.yaml + + dataguardbroker.database.oracle.com/dataguardbroker-sample apply + +``` +You can also use the patch command + +```sh +$ kubectl --type=merge -p '{"spec":{"setAsPrimaryDatabase":"ORCLS1"}}' patch dataguardbroker dataguardbroker-sample + + dataguardbroker.database.oracle.com/dataguardbroker-sample patched +``` + +### Enable Fast-Start Failover + +Oracle Data Guard Fast-Start Failover (FSFO) monitors your Oracle Data Guard environments and initiates an automatic failover in the case of an outage. +To enable FSFO, ensure the primary database is in the primary role, set the attribute `.spec.fastStartFailover` to `true` in [`datguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml), and then apply it. For example: + +```sh +$ kubectl apply -f dataguardbroker.yaml + + dataguardbroker.database.oracle.com/dataguardbroker-sample configured +``` + +You can also use the patch command: + +```sh +$ kubectl --type=merge -p '{"spec":{"fastStartFailover": true}}' patch dataguardbroker dataguardbroker-sample + + dataguardbroker.database.oracle.com/dataguardbroker-sample patched +``` + +Applying this results in the creation of a pod running the Observer. The Observer is a component of the DGMGRL interface, which monitors the availability of the primary database. + +**Note:** When the attribute `fastStartFailover` is `true`, then performing a switchover by specifying `setAsPrimaryDatabase` is not allowed. + +### Convert Standby to Snapshot Standby + +A snapshot standby is a fully updatable standby database that can be used development and testing. It receives and archives, but does not apply redo data from a primary database. The redo data received from the primary database is applied after a snapshot standby database is converted back into a physical standby database, and after discarding all local updates to the snapshot standby database. + +To convert a standby database to a snapshot standby, Ensure Fast-Start Failover is disabled, and tshen set the attribute `.spec.convertToSnapshotStandby` to `true` in [`singleinstancedatabase.yaml`](./../../config/samples/sidb/singleinstancedatabase.yaml) before applying it. For example: + +```sh +$ kubectl apply -f singleinstancedatabase.yaml + + singleinstancedatabase.database.oracle.com/sidb-sample configured +``` + +You can also use the patch command: + +```sh +$ kubectl --type=merge -p '{"spec":{"convertToSnapshotStandby":true}}' patch singleinstancedatabase sidb-sample + + singleinstancedatabase.database.oracle.com/sidb-sample patched +``` + +### Static Data Guard Connect String + + External and internal (running in pods) applications can always connect to the database in the primary role by using `.status.externalConnectString` and `.status.clusterConnectString` of the Data Guard broker resource respectively. These connect strings are fixed for the Data Guard broker resource, and will not change on switchover or failover. The external connect string can be obtained using the following command: + + ```sh + $ kubectl get dataguardbroker dataguardbroker-sample -o "jsonpath={.status.externalConnectString}" + + 10.0.25.87:1521/DATAGUARD + ``` + This connect string will always automatically route to the database in the primary role. Client applications can be totally agnostic of the databases in the Oracle Data Guard configuration. Their number or host/IP details are not needed in the connect string. + +### Patch Primary and Standby databases + +Databases (both primary and standby) running in you cluster and managed by the Oracle Database operator can be patched between release updates of the same major release. + +To patch an existing database, edit and apply the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: + +```sh +kubectl --type=merge -p '{"spec":{"image":{"pullFrom":"patched-image:tag","pullSecrets":"pull-secret"}}}' patch singleinstancedatabase + +``` +Follow these steps for patching databases configured with the Data Guard broker: +1. Ensure Fast-Start Failover is disabled by running the following command +```sh + kubectl patch dataguardbroker dataguardbroker-sample -p '{"spec":{"fastStartFailover": false}}' --type=merge +``` +2. Patch all the standby databases by replacing the image with the new release update image. +3. Perform switchover of the primary to one of the standby databases. +4. Patch the original primary database (currently standby after #2) + After step 3, the software for primary and standby databases is at the same release update +5. Bounce the current primary database by updating the replica count to 0 and then 1 + Step 5 will trigger a datapatch execution, which results in patching the datafiles +6. Finally, perform switch over of the current primary back to the original primary (current standby) + + +### Delete the Data Guard Configuration + +To delete a standby or primary database configured for Oracle Data Guard, delete the `dataguardbroker` resource. After that is done, delete the standby databases, and then finally the primary database. + +#### Delete DataguardBroker Resource +```sh +$ kubectl delete dataguardbroker dgbroker-sample + + dataguardbroker.database.oracle.com/dgbroker-sample deleted +``` + +**Note:** If a switchover to standby was performed, then ensure that you switch back to the original primary database before deleting the Data Guard broker resource. For example: +#### Delete Standby Database +```sh +$ kubectl delete singleinstancedatabase stdby-1 + + singleinstancedatabase.database.oracle.com "stdby-1" deleted +``` + +### Execute Custom Scripts + +You can set up custom scripts (SQL, shell scripts, or both) to run after the initial database setup, and to have scripts run after each startup of the database. SQL scripts will be executed as `sysdba`, and shell scripts will be executed as the current user. To ensure proper order, Oracle recommends that you prefix your scripts with a number. For example: `01_users.sql`, `02_permissions.sql`, and son on. To ensure that these scripts are available to run after setup or after each database startup, place all such scripts in setup and startup folders created in a persistent volume. + +Create a persistent volume by using [static provisioning](#static-persistence) and then specify the name of this volume with the `<.spec.persistence.scriptsVolumeName>` field which corresponds to the `scriptsVolumeName` field of the persistence section in the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. + +**Note:** Running custom scripts requires read and list access for persistent volumes, as mentioned in [prerequisites](#prerequisites) + +## OracleRestDataService Resource + +The Oracle Database Operator creates the `OracleRestDataService` as a custom resource. In this documeent, we will refer to `OracleRestDataService` as ORDS. Creating ORDS as a custom resource enables the RESTful API access to the Oracle Database in K8s, and enables it to be managed as a native Kubernetes object. + +### Resource Details + +#### ORDS List +To list ORDS services, use the following command: + +```sh +$ kubectl get oraclerestdataservice -o name + + oraclerestdataservice.database.oracle.com/ords-sample + +``` + +#### Quick Status +To obtain a quick status check of the ORDS service, use the following command: + +```sh +$ kubectl get oraclerestdataservice ords-sample + +NAME STATUS DATABASE DATABASE API URL DATABASE ACTIONS URL APEX URL +ords-sample Healthy sidb-sample http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ http://10.0.25.54:8181/ords/sql-developer http://10.0.25.54:8181/ords/apex + +``` + +#### Detailed Status +To obtain a detailed status check of the ORDS service, use the following command: + +```sh +$ kubectl describe oraclerestdataservice ords-sample + + Name: ords-sample + Namespace: default + Labels: + Annotations: + API Version: database.oracle.com/v1alpha1 + Kind: OracleRestDataService + Metadata: ... + Spec: ... + Status: + Cluster Db API URL: http://ords21c-1.default:8181/ords/schema1/_/db-api/stable/ + Database Actions URL: http://10.0.25.54:8181/ords/sql-developer + Database API URL: http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ + Apex URL: http://10.0.25.54:8181/ords/apex + Database Ref: sidb21c-1 + Image: + Pull From: ... + Pull Secrets: ... + Load Balancer: true + Ords Installed: true + Persistence: + Access Mode: ReadWriteMany + Size: 100Gi + Storage Class: + Service IP: 10.0.25.54 + Status: Healthy + +``` + +### Template YAML + +The template `.yaml` file for Oracle Rest Data Services (`OracleRestDataService` kind), including all the configurable options, is available at **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)**. + +**Note:** +- The `adminPassword` and `ordsPassword` fields in the `oraclerestdataservice.yaml` file contains secrets for authenticating the Single Instance Database and the ORDS user with the following roles: `SQL Administrator, System Administrator, SQL Developer, oracle.dbtools.autorest.any.schema`. + +- If you want to install ORDS in a [prebuilt database](#provision-a-pre-built-database), then ensure that you attach the **database persistence** by uncommenting the `persistence` section in the **[`config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml`](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file, while provisioning the prebuilt database. + +### REST Enable a Database + +#### Provision ORDS + +To quickly provision a new ORDS instance, use the example **[`config/samples/sidb/oraclerestdataservice_create.yaml`](../../config/samples/sidb/oraclerestdataservice_create.yaml)** file. For example: + +```sh +$ kubectl apply -f oraclerestdataservice_create.yaml + + oraclerestdataservice.database.oracle.com/ords-sample created +``` +After this command completes, ORDS is installed in the container database (CDB) of the Single Instance Database. + +##### Note: +You are required to specify the ORDS Secret in the [`oraclerestdataservice_create.yaml`](../../config/samples/sidb/oraclerestdataservice_create.yaml) file. The default value mentioned in the `adminPassword.secretName` field is `ords-secret`. You can create this Secret manually by using the following command: + +```bash +kubectl create secret generic ords-secret --from-literal=oracle_pwd= +``` + +Alternatively, you can create this Secret by filling the passwords in the **[`oraclerestdataservice_secrets.yaml`](../../config/samples/sidb/oraclerestdataservice_secrets.yaml)** file and applying it using the following command: + +```bash +kubectl apply -f singleinstancedatabase_secrets.yaml +``` + +#### Creation Status + +Creating a new ORDS instance takes a while. To check the status of the ORDS instance, use the following command: + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.status}" + + Healthy +``` +ORDS is open for connections when the `status` column returns `Healthy`. + +#### REST Endpoints + +Clients can access the REST Endpoints using `.status.databaseApiUrl` as shown in the following command. + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.databaseApiUrl}" + + http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ +``` + +All the REST Endpoints can be found in [_REST APIs for Oracle Database_](https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/rest-endpoints.html). + +There are two basic approaches for authentication to the REST Endpoints. Certain APIs are specific about which authentication method they will accept. + +#### Database API + +To call certain REST endpoints, you must use the Schema User, which is REST-Enabled with role `SQL Administrator`, and `.spec.ordsPassword` credentials. + +The Schema user also has the following additional roles: `System Administrator, SQL Developer`. + +Use this Schema user to authenticate the following: +* Database APIs +* Any Protected AutoRest Enabled Object APIs +* Database Actions of any REST Enabled Schema + +##### Examples +Some examples for the Database API usage for REST-Enabled schema1 are as follows: +- **Get all Database Components** + ```sh + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/components/ | python -m json.tool + ``` +- **Get all Database Users** + ```sh + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/security/users/ | python -m json.tool + ``` +- **Get all Tablespaces** + ```sh + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/storage/tablespaces/ | python -m json.tool + ``` +- **Get all Database Parameters** + ```sh + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/parameters/ | python -m json.tool + ``` +- **Get all Feature Usage Statistics** + ```sh + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/feature_usage/ | python -m json.tool + ``` + +#### MongoDB API + +To enable the Database API for MongoDB, set `.spec.mongoDbApi` to `true`. When this is done, MongoDB applications are be able to connect to Oracle Database using the MongoDB API Access URL. For example: + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.mongoDbApiAccessUrl}" + + mongodb://[{user}:{password}@]10.0.25.54:27017/{user}?authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true +``` + +* Change [{user}:{password}@] to database username and password. Retain the @ symbol but remove all the brackets. +* Change the {user} later in the URL to database username as well. + +#### Advanced Usages + +##### Oracle Data Pump +The Oracle REST Data Services (ORDS) database API enables you to create Oracle Data Pump export and import jobs by using REST web service calls. + +REST APIs for Oracle Data Pump Jobs can be found at [https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/op-database-datapump-jobs-post.html](https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/op-database-datapump-jobs-post.html). +##### REST Enabled SQL + +The REST-Enabled SQL functionality is available to all of the schemas specified in the `.spec.restEnableSchemas` attribute of the example yaml in the sample folder. Only these schemas will have access SQL Developer Web Console specified by the Database Actions URL. + +The REST-Enabled SQL functionality enables REST calls to send DML, DDL and scripts to any REST-Enabled schema by exposing the same SQL engine used in SQL Developer and Oracle SQLcl (SQL Developer Command Line). + +For example: + +**Run a Script:** + +Create a file called "/tmp/table.sql" with the following contents. + +```sh + CREATE TABLE DEPT ( + DEPTNO NUMBER(2) CONSTRAINT PK_DEPT PRIMARY KEY, + DNAME VARCHAR2(14), + LOC VARCHAR2(13) + ) ; + + INSERT INTO DEPT VALUES (10,'ACCOUNTING','NEW YORK'); + INSERT INTO DEPT VALUES (20,'RESEARCH','DALLAS'); + INSERT INTO DEPT VALUES (30,'SALES','CHICAGO'); + INSERT INTO DEPT VALUES (40,'OPERATIONS','BOSTON'); + COMMIT; +``` + +Run the following API to run the script created in the previous example: + +```sh + curl -s -k -X "POST" "http://10.0.25.54:8181/ords/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ + -H "Content-Type: application/sql" \ + -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' \ + -d @/tmp/table.sql +``` + +**Basic Call:** + +Fetch all entries from 'DEPT' table by calling the following API + +```sh + curl -s -k -X "POST" "http://10.0.25.54:8181/ords/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ + -H "Content-Type: application/sql" \ + -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' \ + -d $'select * from dept;' | python -m json.tool +``` + +**Note:** `.spec.restEnableSchema[].urlMapping` is optional and is defaulted to `.spec.restEnableSchemas[].schemaName` + +##### Database Actions + +Database Actions is a web-based interface that uses Oracle REST Data Services to provide development, data tools, administration and monitoring features for Oracle Database. + +* To use Database Actions, you must sign in as a database user whose schema has been REST-enabled. +* To enable a schema for REST, you can specify appropriate values for the `.spec.restEnableSchemas` attributes details in the sample `yaml` **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)**, which are needed for authorizing Database Actions. +* Schema are created (if they exist) with the username as `.spec.restEnableSchema[].schema` and password as `.spec.ordsPassword.`. +* UrlMapping `.spec.restEnableSchema[].urlMapping` is optional and is defaulted to `.spec.restEnableSchema[].schema`. + +Database Actions can be accessed with a browser by using `.status.databaseActionsUrl`. For example: + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.databaseActionsUrl}" + + http://10.0.25.54:8181/ords/sql-developer +``` + +To access Database Actions, sign in by using the following code as a database user whose schema has been REST-Enabled: + +* Login Page: \ +Username: `.spec.restEnableSchemas[].schemaName` \ +Password: `.spec.ordsPassword` + +![database-actions-home](/images/sidb/database-actions-home.png) + +For more information about Database Actions, see: [Oracle Database Actions](https://docs.oracle.com/en/database/oracle/sql-developer-web/21.2/index.html). + +### APEX Installation + +Oracle APEX is a low-code development platform that enables developers to build scalable, secure enterprise apps, with world-class features that can be deployed anywhere. + +Using APEX, developers can quickly develop and deploy compelling apps that solve real problems and provide immediate value. Developers won't need to be an expert in a vast array of technologies to deliver sophisticated solutions. Focus on solving the problem and let APEX take care of the rest. + +The `OraOperator` facilitates installation of APEX in the database and also configures ORDS for it. + +* Status of APEX configuration can be checked using the following command: + + ```sh + $ kubectl get oraclerestdataservice ords-sample -o "jsonpath={.status.apexConfigured}" + + [true] + ``` + +Application Express can be accessed via browser using `.status.apexUrl` in the following command. + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.apexUrl}" + + http://10.0.25.54:8181/ords/apex +``` + +Sign in to Administration services using \ +workspace: `INTERNAL` \ +username: `ADMIN` \ +password: `Welcome_1` + +![application-express-admin-home](/images/sidb/application-express-admin-home.png) + +**Note:** +- Oracle strongly recommends that you change the default APEX admin password. +- By default, the full development environment is initialized in APEX. After deployment, you can change it manually to the runtime environment. To change environments, run the script `apxdevrm.sql` after connecting to the primary database from the ORDS pod as the `SYS` user with `SYSDBA` privilege. For detailed instructions, see: [Converting a Full Development Environment to a Runtime Environment](https://docs.oracle.com/en/database/oracle/application-express/21.2/htmig/converting-between-runtime-and-full-development-environments.html#GUID-B0621B40-3441-44ED-9D86-29B058E26BE9). + +### Delete ORDS +- To delete ORDS, run the following command: + + kubectl delete oraclerestdataservice ords-sample + +- You cannot delete the referred Database before deleting its ORDS resource. +- APEX, if installed, also gets uninstalled from the database when ORDS gets deleted. + +## Maintenance Operations +If you need to perform some maintenance operations (Database/ORDS) manually, then the procedure is as follows: +1. Use `kubectl exec` to access the pod where you want to perform the manual operation, a command similar to the following: + + kubectl exec -it /bin/bash + +2. The important locations, such as ORACLE_HOME, ORDS_HOME, and so on, can be found in the environment, by using the `env` command. + +3. Log In to `sqlplus` to perform manual operations by using the following command: + + sqlplus / as sysdba + +## Additional information +Detailed instructions for setting up Single Instance Database by OraOperator using OCI free trial account is available now in the LiveLab format. Please use the following link: + [https://oracle.github.io/cloudtestdrive/AppDev/database-operator/workshops/freetier/?lab=introduction](https://oracle.github.io/cloudtestdrive/AppDev/database-operator/workshops/freetier/?lab=introduction) + +Thanks, [Jan Leemans](https://github.com/janleemans), for this effort!! diff --git a/go.mod b/go.mod index fada426a..863f2e99 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,106 @@ module github.com/oracle/oracle-database-operator -go 1.16 +go 1.23.3 require ( - github.com/go-logr/logr v0.4.0 - github.com/onsi/ginkgo v1.16.4 - github.com/onsi/gomega v1.13.0 - github.com/oracle/oci-go-sdk/v45 v45.2.0 - gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.21.2 - k8s.io/apimachinery v0.21.2 - k8s.io/client-go v0.21.2 - sigs.k8s.io/controller-runtime v0.9.2 - sigs.k8s.io/yaml v1.2.0 + github.com/go-logr/logr v1.4.2 + github.com/onsi/ginkgo/v2 v2.20.2 + github.com/onsi/gomega v1.34.2 + github.com/oracle/oci-go-sdk/v65 v65.77.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 + go.uber.org/zap v1.26.0 + golang.org/x/text v0.19.0 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/cli-runtime v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/kubectl v0.31.3 + sigs.k8s.io/controller-runtime v0.19.3 + sigs.k8s.io/yaml v1.4.0 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/component-base v0.31.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 67d9f3df..d23debb8 100644 --- a/go.sum +++ b/go.sum @@ -1,741 +1,335 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= -github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/oracle/oci-go-sdk/v45 v45.2.0 h1:vCPoQlE+DOrM2heJn66rvPU6fbsc/0Cxtzs2jnFut6U= -github.com/oracle/oci-go-sdk/v45 v45.2.0/go.mod h1:ZM6LGiRO5TPQJxTlrXbcHMbClE775wnGD5U/EerCsRw= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/oracle/oci-go-sdk/v65 v65.77.1 h1:gqjTXIUWvTihkn470AclxSAMcR1JecqjD2IUtp+sDIU= +github.com/oracle/oci-go-sdk/v65 v65.77.1/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= -k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= -k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= -k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= -k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= -k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= -k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= -k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= -k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= -k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= -k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= -k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.2 h1:MnCAsopQno6+hI9SgJHKddzXpmv2wtouZz6931Eax+Q= -sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI= +k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.31.3 h1:3r111pCjPsvnR98oLLxDMwAeM6OPGmPty6gSKaLTQes= +k8s.io/kubectl v0.31.3/go.mod h1:lhMECDCbJN8He12qcKqs2QfmVo9Pue30geovBVpH5fs= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index dd467e1f..1a8fcae1 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** diff --git a/image.png b/image.png new file mode 100644 index 00000000..8bd1bbd5 Binary files /dev/null and b/image.png differ diff --git a/images/adb/acd-id-1.png b/images/adb/acd-id-1.png new file mode 100644 index 00000000..c8e422d5 Binary files /dev/null and b/images/adb/acd-id-1.png differ diff --git a/images/adb/acd-id-2.png b/images/adb/acd-id-2.png new file mode 100644 index 00000000..1680d34d Binary files /dev/null and b/images/adb/acd-id-2.png differ diff --git a/images/adb/adb-id-1.png b/images/adb/adb-id-1.png index 4cbf8c5a..7c2ab1f1 100644 Binary files a/images/adb/adb-id-1.png and b/images/adb/adb-id-1.png differ diff --git a/images/adb/aei-id-1.png b/images/adb/aei-id-1.png new file mode 100644 index 00000000..d407a624 Binary files /dev/null and b/images/adb/aei-id-1.png differ diff --git a/images/adb/aei-id-2.png b/images/adb/aei-id-2.png new file mode 100644 index 00000000..534b7d35 Binary files /dev/null and b/images/adb/aei-id-2.png differ diff --git a/images/adb/instance-principal-2.png b/images/adb/instance-principal-2.png index 31afe00f..d036a7e9 100644 Binary files a/images/adb/instance-principal-2.png and b/images/adb/instance-principal-2.png differ diff --git a/images/adb/instance-principal-3.png b/images/adb/instance-principal-3.png index 602d209a..bc1a3160 100644 Binary files a/images/adb/instance-principal-3.png and b/images/adb/instance-principal-3.png differ diff --git a/images/adb/instance-principal-4.png b/images/adb/instance-principal-4.png new file mode 100644 index 00000000..44bd49a9 Binary files /dev/null and b/images/adb/instance-principal-4.png differ diff --git a/images/adb/instance-principal-5.png b/images/adb/instance-principal-5.png new file mode 100644 index 00000000..602d209a Binary files /dev/null and b/images/adb/instance-principal-5.png differ diff --git a/images/sidb/application-express-admin-home.png b/images/sidb/application-express-admin-home.png new file mode 100644 index 00000000..0d581d40 Binary files /dev/null and b/images/sidb/application-express-admin-home.png differ diff --git a/images/sidb/database-actions-home.png b/images/sidb/database-actions-home.png new file mode 100644 index 00000000..90796d36 Binary files /dev/null and b/images/sidb/database-actions-home.png differ diff --git a/main.go b/main.go index 9e68368f..ee9992b7 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -39,19 +39,37 @@ package main import ( + "context" "flag" + "fmt" "os" "strconv" + "strings" + "time" + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" databasecontroller "github.com/oracle/oracle-database-operator/controllers/database" + dataguardcontroller "github.com/oracle/oracle-database-operator/controllers/dataguard" + + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + observabilityv1 "github.com/oracle/oracle-database-operator/apis/observability/v1" + observabilityv1alpha1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" + observabilityv4 "github.com/oracle/oracle-database-operator/apis/observability/v4" + observabilitycontroller "github.com/oracle/oracle-database-operator/controllers/observability" // +kubebuilder:scaffold:imports ) @@ -62,8 +80,12 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - + utilruntime.Must(observabilityv1alpha1.AddToScheme(scheme)) + utilruntime.Must(monitorv1.AddToScheme(scheme)) utilruntime.Must(databasev1alpha1.AddToScheme(scheme)) + utilruntime.Must(databasev4.AddToScheme(scheme)) + utilruntime.Must(observabilityv1.AddToScheme(scheme)) + utilruntime.Must(observabilityv4.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -76,28 +98,79 @@ func main() { "Enabling this will ensure there is only one active controller manager.") flag.Parse() - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + // Initialize new logger Opts + options := &zap.Options{ + Development: true, + TimeEncoder: zapcore.RFC3339TimeEncoder, + } + + ctrl.SetLogger(zap.New(func(o *zap.Options) { *o = *options })) + + watchNamespaces, err := getWatchNamespace() + if err != nil { + setupLog.Error(err, "Failed to get watch namespaces") + os.Exit(1) + } + opt := ctrl.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + }, + LeaderElection: enableLeaderElection, + LeaderElectionID: "a9d608ea.oracle.com", + NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + opts.DefaultNamespaces = watchNamespaces + return cache.New(config, opts) + }, + } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - LeaderElection: enableLeaderElection, - LeaderElectionID: "a9d608ea.oracle.com", - }) + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opt) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } + // Get Cache + cache := mgr.GetCache() + + // ADB family controllers if err = (&databasecontroller.AutonomousDatabaseReconciler{ KubeClient: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("database").WithName("AutonomousDatabase"), Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("AutonomousDatabase"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AutonomousDatabase") os.Exit(1) } + if err = (&databasecontroller.AutonomousDatabaseBackupReconciler{ + KubeClient: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousDatabaseBackup"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("AutonomousDatabaseBackup"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AutonomousDatabaseBackup") + os.Exit(1) + } + if err = (&databasecontroller.AutonomousDatabaseRestoreReconciler{ + KubeClient: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousDatabaseRestore"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("AutonomousDatabaseRestore"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AutonomousDatabaseRestore") + os.Exit(1) + } + if err = (&databasecontroller.AutonomousContainerDatabaseReconciler{ + KubeClient: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousContainerDatabase"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("AutonomousContainerDatabase"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AutonomousContainerDatabase") + os.Exit(1) + } + if err = (&databasecontroller.SingleInstanceDatabaseReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("database").WithName("SingleInstanceDatabase"), @@ -117,6 +190,26 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "ShardingDatabase") os.Exit(1) } + if err = (&databasecontroller.DbcsSystemReconciler{ + KubeClient: mgr.GetClient(), + Logger: ctrl.Log.WithName("controllers").WithName("database").WithName("DbcsSystem"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("DbcsSystem"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DbcsSystem") + os.Exit(1) + } + if err = (&databasecontroller.OracleRestDataServiceReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("OracleRestDataService"), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + Recorder: mgr.GetEventRecorderFor("OracleRestDataService"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OracleRestDataService") + os.Exit(1) + } + // Set RECONCILE_INTERVAL environment variable if you want to change the default value from 15 secs interval := os.Getenv("RECONCILE_INTERVAL") i, err := strconv.ParseInt(interval, 10, 64) @@ -131,13 +224,243 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "SingleInstanceDatabase") os.Exit(1) } + if err = (&databasev1alpha1.OracleRestDataService{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OracleRestDataService") + os.Exit(1) + } + if err = (&databasev4.PDB{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "PDB") + os.Exit(1) + } + if err = (&databasev4.LRPDB{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "LRPDB") + os.Exit(1) + } + if err = (&databasev4.CDB{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "CDB") + os.Exit(1) + } + if err = (&databasev4.LREST{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "LREST") + os.Exit(1) + } + if err = (&databasev1alpha1.AutonomousDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabase") + os.Exit(1) + } + if err = (&databasev1alpha1.AutonomousDatabaseBackup{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseBackup") + os.Exit(1) + } + if err = (&databasev1alpha1.AutonomousDatabaseRestore{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseRestore") + os.Exit(1) + } + if err = (&databasev1alpha1.AutonomousContainerDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousContainerDatabase") + os.Exit(1) + } + if err = (&databasev4.AutonomousDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabase") + os.Exit(1) + } + if err = (&databasev4.AutonomousDatabaseBackup{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseBackup") + os.Exit(1) + } + if err = (&databasev4.AutonomousDatabaseRestore{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseRestore") + os.Exit(1) + } + if err = (&databasev4.AutonomousContainerDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousContainerDatabase") + os.Exit(1) + } + if err = (&databasev1alpha1.DataguardBroker{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DataguardBroker") + os.Exit(1) + } + if err = (&databasev1alpha1.ShardingDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ShardingDatabase") + os.Exit(1) + } + if err = (&databasev1alpha1.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&databasev4.ShardingDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ShardingDatabase") + } + if err = (&observabilityv1alpha1.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") + os.Exit(1) + } + if err = (&databasev1alpha1.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&databasev4.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&observabilityv1.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") + os.Exit(1) + } + + if err = (&observabilityv4.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") + os.Exit(1) + } + } + + // PDB Reconciler + if err = (&databasecontroller.PDBReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("PDB"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("PDB"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PDB") + os.Exit(1) + } + + // LRPDBR Reconciler + if err = (&databasecontroller.LRPDBReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("LRPDB"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("LRPDB"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LRPDB") + os.Exit(1) + } + + // CDB Reconciler + if err = (&databasecontroller.CDBReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + Log: ctrl.Log.WithName("controllers").WithName("CDB"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("CDB"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CDB") + os.Exit(1) } + // LREST Reconciler + if err = (&databasecontroller.LRESTReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + Log: ctrl.Log.WithName("controllers").WithName("LREST"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("LREST"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LREST") + os.Exit(1) + } + + if err = (&dataguardcontroller.DataguardBrokerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("dataguard").WithName("DataguardBroker"), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + Recorder: mgr.GetEventRecorderFor("DataguardBroker"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DataguardBroker") + os.Exit(1) + } + + if err = (&databasecontroller.OrdsSrvsReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Config: mgr.GetConfig(), + Recorder: mgr.GetEventRecorderFor("OrdsSrvs"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OrdsSrvs") + } + + // Observability DatabaseObserver Reconciler + if err = (&observabilitycontroller.DatabaseObserverReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("observability").WithName("DatabaseObserver"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("DatabaseObserver"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DatabaseObserver") + os.Exit(1) + } + + if err = (&databasev4.SingleInstanceDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "SingleInstanceDatabase") + os.Exit(1) + } + if err = (&databasev4.DataguardBroker{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DataguardBroker") + os.Exit(1) + } + if err = (&databasev4.OracleRestDataService{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OracleRestDataService") + os.Exit(1) + } // +kubebuilder:scaffold:builder + // Add index for PDB CR to enable mgr to cache PDBs + indexFunc := func(obj client.Object) []string { + return []string{obj.(*databasev4.PDB).Spec.PDBName} + } + if err = cache.IndexField(context.TODO(), &databasev4.PDB{}, "spec.pdbName", indexFunc); err != nil { + setupLog.Error(err, "unable to create index function for ", "controller", "PDB") + os.Exit(1) + } + + indexFunc2 := func(obj client.Object) []string { + return []string{obj.(*databasev4.LRPDB).Spec.LRPDBName} + } + if err = cache.IndexField(context.TODO(), &databasev4.LRPDB{}, "spec.pdbName", indexFunc2); err != nil { + setupLog.Error(err, "unable to create index function for ", "controller", "LRPDB") + os.Exit(1) + } + setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } + +func getWatchNamespace() (map[string]cache.Config, error) { + // WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE + // which specifies the Namespace to watch. + // An empty value means the operator is running with cluster scope. + + var watchNamespaceEnvVar = "WATCH_NAMESPACE" + var nsmap map[string]cache.Config + ns, found := os.LookupEnv(watchNamespaceEnvVar) + values := strings.Split(ns, ",") + if len(values) == 1 && values[0] == "" { + fmt.Printf(":CLUSTER SCOPED:\n") + return nil, nil + } + fmt.Printf(":NAMESPACE SCOPED:\n") + fmt.Printf("WATCH LIST=%s\n", values) + nsmap = make(map[string]cache.Config, len(values)) + if !found { + return nsmap, fmt.Errorf("%s must be set", watchNamespaceEnvVar) + } + + if ns == "" { + return nil, nil + } + + for _, ns := range values { + nsmap[ns] = cache.Config{} + } + + return nsmap, nil + +} diff --git a/oracle-database-operator.yaml b/oracle-database-operator.yaml index ae464a57..1179b272 100644 --- a/oracle-database-operator.yaml +++ b/oracle-database-operator.yaml @@ -9,126 +9,72 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: autonomousdatabases.database.oracle.com + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomouscontainerdatabases.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: - kind: AutonomousDatabase - listKind: AutonomousDatabaseList - plural: autonomousdatabases + kind: AutonomousContainerDatabase + listKind: AutonomousContainerDatabaseList + plural: autonomouscontainerdatabases shortNames: - - adb - - adbs - singular: autonomousdatabase + - acd + - acds + singular: autonomouscontainerdatabase scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .status.displayName - name: Display Name + - jsonPath: .spec.displayName + name: DisplayName type: string - jsonPath: .status.lifecycleState name: State type: string - - jsonPath: .status.isDedicated - name: Dedicated - type: string - - jsonPath: .status.cpuCoreCount - name: OCPUs - type: integer - - jsonPath: .status.dataStorageSizeInTBs - name: Storage (TB) - type: integer - - jsonPath: .status.dbWorkload - name: Workload Type - type: string - jsonPath: .status.timeCreated name: Created type: string name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabase is the Schema for the autonomousdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase Important: Run "make" to regenerate code after modifying this file' properties: - details: - description: AutonomousDatabaseDetails defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase - properties: - adminPassword: - properties: - k8sSecretName: - type: string - ociSecretOCID: - type: string - type: object - autonomousDatabaseOCID: - type: string - compartmentOCID: - type: string - cpuCoreCount: - type: integer - dataStorageSizeInTBs: - type: integer - dbName: - type: string - dbVersion: - type: string - dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying type: string' - enum: - - OLTP - - DW - - AJD - - APEX - type: string - displayName: - type: string - freeformTags: - additionalProperties: - type: string - type: object - isAutoScalingEnabled: - type: boolean - isDedicated: - type: boolean - lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying type: string' - type: string - nsgOCIDs: - items: - type: string - type: array - privateEndpoint: - type: string - privateEndpointIP: - type: string - privateEndpointLabel: - type: string - subnetOCID: - type: string - wallet: - properties: - name: - type: string - password: - properties: - k8sSecretName: - type: string - ociSecretOCID: - type: string - type: object - type: object + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string type: object hardLink: default: false @@ -140,732 +86,13595 @@ spec: secretName: type: string type: object - required: - - details + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string type: object status: - description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase properties: - cpuCoreCount: - type: integer - dataStorageSizeInTBs: - type: integer - dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying type: string' + lifecycleState: + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: type: string displayName: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' type: string - isDedicated: + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS type: string + type: object + status: + properties: lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying type: string' type: string timeCreated: type: string + required: + - lifecycleState type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: shardingdatabases.database.oracle.com + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomousdatabasebackups.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: - kind: ShardingDatabase - listKind: ShardingDatabaseList - plural: shardingdatabases - singular: shardingdatabase + kind: AutonomousDatabaseBackup + listKind: AutonomousDatabaseBackupList + plural: autonomousdatabasebackups + shortNames: + - adbbu + - adbbus + singular: autonomousdatabasebackup scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v1alpha1 schema: openAPIV3Schema: - description: ShardingDatabase is the Schema for the shardingdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: ShardingDatabaseSpec defines the desired state of ShardingDatabase properties: - catalog: - items: - description: CatalogSpec defines the desired state of CatalogSpec - properties: - envVars: - items: - description: EnvironmentVariable represents a named variable accessible for containers. - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image - type: string - isDelete: - type: boolean - label: - type: string - name: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - pvAnnotations: - additionalProperties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + properties: + k8sADB: + properties: + name: type: string - type: object - pvMatchLabels: - additionalProperties: + type: object + ociADB: + properties: + ocid: type: string - type: object - pvcName: - type: string - resources: - description: ResourceRequirements describes the compute resource requirements. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - storageSizeInGb: - format: int32 - type: integer - required: - - name - type: object - type: array - dbImage: + type: object + type: object + type: object + status: + properties: + autonomousDatabaseOCID: type: string - dbImagePullSecret: + compartmentOCID: type: string - gsm: - items: - description: GsmSpec defines the desired state of GsmSpec - properties: - envVars: - items: - description: EnvironmentVariable represents a named variable accessible for containers. - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image - type: string - isDelete: - type: boolean - label: - type: string - name: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - pvMatchLabels: - additionalProperties: - type: string - type: object - pvcName: - type: string - replicas: - format: int32 - type: integer - resources: - description: ResourceRequirements describes the compute resource requirements. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - storageSizeInGb: - format: int32 - type: integer - required: - - name - type: object - type: array - gsmImage: + dbDisplayName: type: string - gsmImagePullSecret: + dbName: type: string - isClone: - type: boolean - isDataGuard: - type: boolean - isDebug: - type: boolean - isDeleteOraPvc: - type: boolean - isExternalSvc: + isAutomatic: type: boolean - namespace: - type: string - nsConfigMap: - type: string - nsSecret: - type: string - portMappings: - items: - description: PortMapping is a specification of port mapping for an application deployment. - properties: - port: - format: int32 - type: integer - protocol: - default: TCP - type: string - targetPort: - format: int32 - type: integer - required: - - port - - protocol - - targetPort - type: object - type: array - scriptsLocation: + lifecycleState: type: string - secret: + timeEnded: type: string - shard: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' - items: - description: ShardSpec is a specification of Shards for an application deployment. - properties: - envVars: - items: - description: EnvironmentVariable represents a named variable accessible for containers. - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image - type: string - isDelete: - type: boolean - label: - type: string - name: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - pvAnnotations: - additionalProperties: - type: string - type: object - pvMatchLabels: - additionalProperties: - type: string - type: object - pvcName: - type: string - resources: - description: ResourceRequirements describes the compute resource requirements. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - storageSizeInGb: - format: int32 - type: integer - required: - - name - type: object - type: array - stagePvcName: + timeStarted: type: string - storageClass: + type: type: string required: - - catalog - - dbImage - - gsm - - gsmImage - - secret - - shard - type: object - status: - description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 ShardingDatabaseStatus defines the observed state of ShardingDatabase - properties: - catalogs: - additionalProperties: - type: string - type: object - conditions: - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - gsm: - properties: - details: - additionalProperties: - type: string - type: object - externalConnectStr: - type: string - internalConnectStr: - type: string - services: - type: string - shards: - additionalProperties: - type: string - type: object - state: - type: string - type: object - shards: - additionalProperties: - type: string - type: object + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.1 - name: singleinstancedatabases.database.oracle.com -spec: - group: database.oracle.com - names: - kind: SingleInstanceDatabase - listKind: SingleInstanceDatabaseList - plural: singleinstancedatabases - singular: singleinstancedatabase - scope: Namespaced - versions: - additionalPrinterColumns: - - jsonPath: .status.edition - name: Edition - type: string - - jsonPath: .status.status - name: Status - type: string - - jsonPath: .status.role - name: Role - priority: 1 + - jsonPath: .status.lifecycleState + name: State type: string - - jsonPath: .status.releaseUpdate - name: Version + - jsonPath: .status.dbDisplayName + name: DB DisplayName type: string - - jsonPath: .status.connectString - name: Connect Str + - jsonPath: .status.type + name: Type type: string - - jsonPath: .status.pdbConnectString - name: Pdb Connect Str - priority: 1 + - jsonPath: .status.timeStarted + name: Started type: string - - jsonPath: .status.oemExpressUrl - name: Oem Express Url + - jsonPath: .status.timeEnded + name: Ended type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase properties: - adminPassword: - description: SingleInsatnceAdminPassword defines the secret containing Admin Password mapped to secretKey for Database - properties: - keepSecret: - type: boolean - secretKey: - type: string - secretName: - type: string - required: - - secretKey - - secretName - type: object - archiveLog: - type: boolean - charset: + autonomousDatabaseBackupOCID: type: string - cloneFrom: - type: string - edition: - enum: - - standard - - enterprise + displayName: type: string - flashBack: - type: boolean - forceLog: + isLongTermBackup: type: boolean - image: - description: SingleInstanceDatabaseImage defines the Image source and pullSecrets for POD + ociConfig: properties: - pullFrom: - type: string - pullSecrets: + configMapName: type: string - version: + secretName: type: string - required: - - pullFrom - type: object - initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters - properties: - cpuCount: - type: integer - pgaAggregateTarget: - type: integer - processes: - type: integer - sgaTarget: - type: integer type: object - installApex: - type: boolean - loadBalancer: - type: boolean - nodeSelector: - additionalProperties: - type: string - type: object - pdbName: - type: string - persistence: - description: SingleInstanceDatabasePersistence defines the storage size and class for PVC + retentionPeriodInDays: + type: integer + target: properties: - accessMode: - enum: - - ReadWriteOnce - - ReadWriteMany - type: string - size: - type: string - storageClass: - type: string - required: - - accessMode - - size - - storageClass + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object type: object - readinessCheckPeriod: - type: integer - replicas: - minimum: 1 - type: integer - sid: - description: SID can only have a-z , A-Z, 0-9 . It cant have any special characters - pattern: ^[a-zA-Z0-9]+$ - type: string - required: - - adminPassword - - image - - persistence - - replicas type: object status: - description: SingleInstanceDatabaseStatus defines the observed state of SingleInstanceDatabase properties: - apexInstalled: - type: boolean - archiveLog: - type: string - charset: - type: string - cloneFrom: + autonomousDatabaseOCID: type: string - clusterConnectString: + compartmentOCID: type: string - conditions: - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - connectString: + dbDisplayName: type: string - datafilesCreated: + dbName: type: string - datafilesPatched: + isAutomatic: + type: boolean + lifecycleState: type: string - edition: + timeEnded: type: string - flashBack: + timeStarted: type: string - forceLog: + type: type: string - initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomousdatabaserestores.database.oracle.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 + group: database.oracle.com + names: + kind: AutonomousDatabaseRestore + listKind: AutonomousDatabaseRestoreList + plural: autonomousdatabaserestores + shortNames: + - adbr + - adbrs + singular: autonomousdatabaserestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: properties: - cpuCount: - type: integer - pgaAggregateTarget: - type: integer - processes: - type: integer - sgaTarget: - type: integer + configMapName: + type: string + secretName: + type: string type: object - initPgaSize: - type: integer - initSgaSize: - type: integer - nodes: - items: - type: string - type: array - oemExpressUrl: - type: string - ordsReference: + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: type: string - pdbConnectString: + displayName: type: string - pdbName: + status: type: string - persistence: - description: SingleInstanceDatabasePersistence defines the storage size and class for PVC - properties: - accessMode: - enum: - - ReadWriteOnce - - ReadWriteMany - type: string - size: - type: string - storageClass: - type: string - required: - - accessMode - - size - - storageClass - type: object - releaseUpdate: + timeAccepted: type: string - replicas: - type: integer - role: + timeEnded: type: string - sid: + timeStarted: type: string - standbyDatabases: - additionalProperties: - type: string - type: object - status: + workRequestOCID: type: string required: - - persistence - - replicas + - dbName + - displayName + - status + - workRequestOCID type: object type: object served: true - storage: true + storage: false subresources: - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: oracle-database-operator-leader-election-role + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: + type: string + displayName: + type: string + status: + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: autonomousdatabases.database.oracle.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 + group: database.oracle.com + names: + kind: AutonomousDatabase + listKind: AutonomousDatabaseList + plural: autonomousdatabases + shortNames: + - adb + - adbs + singular: autonomousdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + cloneType: + enum: + - FULL + - METADATA + type: string + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object + required: + - action + type: object + status: + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: + type: string + timeCreated: + type: string + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + cloneType: + enum: + - FULL + - METADATA + type: string + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object + required: + - action + type: object + status: + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: + type: string + timeCreated: + type: string + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: cdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: CDB + listKind: CDBList + plural: cdbs + singular: cdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databaseobservers.observability.oracle.com +spec: + group: observability.oracle.com + names: + kind: DatabaseObserver + listKind: DatabaseObserverList + plural: databaseobservers + shortNames: + - dbobserver + - dbobservers + singular: databaseobserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: dataguardbrokers.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DataguardBroker + listKind: DataguardBrokerList + plural: dataguardbrokers + singular: dataguardbroker + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: dbcssystems.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: dbcssystems + singular: dbcssystem + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPasswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPasswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPasswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrests.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LREST + listKind: LRESTList + plural: lrests + singular: lrest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the LREST + jsonPath: .spec.cdbName + name: CDB NAME + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the LREST Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message if any + jsonPath: .status.msg + name: Message + type: string + - description: string of the tnsalias + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + lrestImage: + type: string + lrestImagePullPolicy: + enum: + - Always + - Never + type: string + lrestImagePullSecret: + type: string + lrestPort: + type: integer + lrestPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrpdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LRPDB + listKind: LRPDBList + plural: lrpdbs + singular: lrpdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the LRPDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: last sqlcode + jsonPath: .status.sqlCode + name: last sqlcode + type: integer + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + - Alter + - Noaction + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbPass: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + alterSystem: + type: string + alterSystemParameter: + type: string + alterSystemValue: + type: string + asClone: + type: boolean + assertiveLrpdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + lrpdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + parameterScope: + type: string + pdbName: + type: string + pdbState: + enum: + - OPEN + - CLOSE + - ALTER + type: string + pdbconfigmap: + type: string + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + - alterSystemParameter + - alterSystemValue + - webServerPwd + type: object + status: + properties: + action: + type: string + alterSystem: + type: string + bitstat: + type: integer + bitstatstr: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + sqlCode: + type: integer + status: + type: boolean + totalSize: + type: string + required: + - phase + - sqlCode + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: oraclerestdataservices.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OracleRestDataService + listKind: OracleRestDataServiceList + plural: oraclerestdataservices + singular: oraclerestdataservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: ordssrvs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OrdsSrvs + listKind: OrdsSrvsList + plural: ordssrvs + singular: ordssrvs + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: status + type: string + - jsonPath: .status.workloadType + name: workloadType + type: string + - jsonPath: .status.ordsVersion + name: ordsVersion + type: string + - jsonPath: .status.httpPort + name: httpPort + type: integer + - jsonPath: .status.httpsPort + name: httpsPort + type: integer + - jsonPath: .status.mongoPort + name: MongoPort + type: integer + - jsonPath: .status.restartRequired + name: restartRequired + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.ordsInstalled + name: OrdsInstalled + type: boolean + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + encPrivKey: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + forceRestart: + type: boolean + globalSettings: + properties: + cache.metadata.enabled: + type: boolean + cache.metadata.graphql.expireAfterAccess: + format: int64 + type: integer + cache.metadata.graphql.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.enabled: + type: boolean + cache.metadata.jwks.expireAfterAccess: + format: int64 + type: integer + cache.metadata.jwks.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.initialCapacity: + format: int32 + type: integer + cache.metadata.jwks.maximumSize: + format: int32 + type: integer + cache.metadata.timeout: + format: int64 + type: integer + certSecret: + properties: + cert: + type: string + key: + type: string + secretName: + type: string + required: + - cert + - key + - secretName + type: object + database.api.enabled: + type: boolean + database.api.management.services.disabled: + type: boolean + db.invalidPoolTimeout: + format: int64 + type: integer + debug.printDebugToScreen: + type: boolean + enable.mongo.access.log: + default: false + type: boolean + enable.standalone.access.log: + default: false + type: boolean + error.responseFormat: + type: string + feature.grahpql.max.nesting.depth: + format: int32 + type: integer + icap.port: + format: int32 + type: integer + icap.secure.port: + format: int32 + type: integer + icap.server: + type: string + log.procedure: + type: boolean + mongo.enabled: + type: boolean + mongo.idle.timeout: + format: int64 + type: integer + mongo.op.timeout: + format: int64 + type: integer + mongo.port: + default: 27017 + format: int32 + type: integer + request.traceHeaderName: + type: string + security.credentials.attempts: + format: int32 + type: integer + security.credentials.lock.time: + format: int64 + type: integer + security.disableDefaultExclusionList: + type: boolean + security.exclusionList: + type: string + security.externalSessionTrustedOrigins: + type: string + security.forceHTTPS: + type: boolean + security.httpsHeaderCheck: + type: string + security.inclusionList: + type: string + security.maxEntries: + format: int32 + type: integer + security.verifySSL: + type: boolean + standalone.context.path: + default: /ords + type: string + standalone.http.port: + default: 8080 + format: int32 + type: integer + standalone.https.host: + type: string + standalone.https.port: + default: 8443 + format: int32 + type: integer + standalone.stop.timeout: + format: int64 + type: integer + type: object + image: + type: string + imagePullPolicy: + default: IfNotPresent + enum: + - IfNotPresent + - Always + - Never + type: string + imagePullSecrets: + type: string + poolSettings: + items: + properties: + apex.security.administrator.roles: + type: string + apex.security.user.roles: + type: string + autoUpgradeAPEX: + default: false + type: boolean + autoUpgradeORDS: + default: false + type: boolean + db.adminUser: + type: string + db.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.cdb.adminUser: + type: string + db.cdb.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.connectionType: + enum: + - basic + - tns + - customurl + type: string + db.credentialsSource: + enum: + - pool + - request + type: string + db.customURL: + type: string + db.hostname: + type: string + db.poolDestroyTimeout: + format: int64 + type: integer + db.port: + format: int32 + type: integer + db.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.servicename: + type: string + db.sid: + type: string + db.tnsAliasName: + type: string + db.username: + default: ORDS_PUBLIC_USER + type: string + db.wallet.zip.service: + type: string + dbWalletSecret: + properties: + secretName: + type: string + walletName: + type: string + required: + - secretName + - walletName + type: object + debug.trackResources: + type: boolean + feature.openservicebroker.exclude: + type: boolean + feature.sdw: + type: boolean + http.cookie.filter: + type: string + jdbc.DriverType: + enum: + - thin + - oci8 + type: string + jdbc.InactivityTimeout: + format: int32 + type: integer + jdbc.InitialLimit: + format: int32 + type: integer + jdbc.MaxConnectionReuseCount: + format: int32 + type: integer + jdbc.MaxConnectionReuseTime: + format: int32 + type: integer + jdbc.MaxLimit: + format: int32 + type: integer + jdbc.MaxStatementsLimit: + format: int32 + type: integer + jdbc.MinLimit: + format: int32 + type: integer + jdbc.SecondsToTrustIdleConnection: + format: int32 + type: integer + jdbc.auth.admin.role: + type: string + jdbc.auth.enabled: + type: boolean + jdbc.cleanup.mode: + type: string + jdbc.statementTimeout: + format: int32 + type: integer + misc.defaultPage: + type: string + misc.pagination.maxRows: + format: int32 + type: integer + owa.trace.sql: + type: boolean + plsql.gateway.mode: + enum: + - disabled + - direct + - proxied + type: string + poolName: + type: string + procedure.preProcess: + type: string + procedure.rest.preHook: + type: string + procedurePostProcess: + type: string + restEnabledSql.active: + type: boolean + security.jwks.connection.timeout: + format: int64 + type: integer + security.jwks.read.timeout: + format: int64 + type: integer + security.jwks.refresh.interval: + format: int64 + type: integer + security.jwks.size: + format: int32 + type: integer + security.jwt.allowed.age: + format: int64 + type: integer + security.jwt.allowed.skew: + format: int64 + type: integer + security.jwt.profile.enabled: + type: boolean + security.requestAuthenticationFunction: + type: string + security.requestValidationFunction: + default: ords_util.authorize_plsql_gateway + type: string + security.validationFunctionType: + enum: + - plsql + - javascript + type: string + soda.defaultLimit: + type: string + soda.maxLimit: + type: string + tnsAdminSecret: + properties: + secretName: + type: string + required: + - secretName + type: object + required: + - db.secret + - poolName + type: object + type: array + replicas: + default: 1 + format: int32 + minimum: 1 + type: integer + workloadType: + default: Deployment + enum: + - Deployment + - StatefulSet + - DaemonSet + type: string + required: + - globalSettings + - image + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + httpPort: + format: int32 + type: integer + httpsPort: + format: int32 + type: integer + mongoPort: + format: int32 + type: integer + ordsInstalled: + type: boolean + ordsVersion: + type: string + restartRequired: + type: boolean + status: + type: string + workloadType: + type: string + required: + - restartRequired + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: pdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: PDB + listKind: PDBList + plural: pdbs + singular: pdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: shardingdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: ShardingDatabase + listKind: ShardingDatabaseList + plural: shardingdatabases + singular: shardingdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + properties: + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + properties: + directorName: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: + type: string + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + properties: + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + properties: + directorName: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: + type: string + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: singleinstancedatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: SingleInstanceDatabase + listKind: SingleInstanceDatabaseList + plural: singleinstancedatabases + singular: singleinstancedatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: + type: boolean + charset: + type: string + convertToSnapshotStandby: + type: boolean + createAs: + enum: + - primary + - standby + - clone + - truecache + type: string + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: + items: + type: string + type: array + required: + - image + type: object + status: + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBroker: + type: string + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: false + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: + type: boolean + charset: + type: string + convertToSnapshotStandby: + type: boolean + createAs: + enum: + - primary + - standby + - clone + - truecache + type: string + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: + items: + type: string + type: array + required: + - image + type: object + status: + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBroker: + type: string + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: oracle-database-operator-leader-election-role namespace: oracle-database-operator-system rules: - apiGroups: @@ -911,14 +13720,23 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: oracle-database-operator-manager-role rules: - apiGroups: - "" resources: - configmaps + - containers + - deployments + - events + - namespaces + - persistentvolumeclaims + - pods + - pods/exec + - pods/log + - replicasets - secrets + - services verbs: - create - delete @@ -930,21 +13748,29 @@ rules: - apiGroups: - "" resources: - - events - - nodes - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - services + - configmaps/status + - daemonsets/status + - deployments/status + - services/status + - statefulsets/status verbs: - - create - - delete - get - - list - patch - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list - watch +- apiGroups: + - "" + resources: + - secrets/status + verbs: + - get - apiGroups: - '''''' resources: @@ -960,6 +13786,18 @@ rules: - apiGroups: - apps resources: + - configmaps + verbs: + - get + - list +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - pods + - replicasets + - services - statefulsets verbs: - create @@ -979,18 +13817,21 @@ rules: - list - update - apiGroups: - - "" + - database.oracle.com resources: - - configmaps + - autonomouscontainerdatabases + - autonomousdatabases + - cdbs + - dataguardbrokers + - dbcssystems - events - - namespaces - - nodes - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - secrets - - services + - lrests + - lrpdbs + - oraclerestdataservices + - ordssrvs + - pdbs + - shardingdatabases + - singleinstancedatabases verbs: - create - delete @@ -1000,21 +13841,35 @@ rules: - update - watch - apiGroups: - - "" + - database.oracle.com resources: - - pods/exec + - autonomouscontainerdatabases/status + - autonomousdatabasebackups/status + - autonomousdatabaserestores/status + - cdbs/status + - dataguardbrokers/status + - dbcssystems/status + - lrests/status + - lrpdbs/status + - oraclerestdataservices/status + - ordssrvs/status + - pdbs/status + - shardingdatabases/status + - singleinstancedatabases/status verbs: - - create + - get + - patch + - update - apiGroups: - database.oracle.com resources: - - autonomousdatabases + - autonomousdatabasebackups + - autonomousdatabaserestores verbs: - create - delete - get - list - - patch - update - watch - apiGroups: @@ -1027,18 +13882,20 @@ rules: - apiGroups: - database.oracle.com resources: - - shardingdatabases + - cdbs/finalizers + - dataguardbrokers/finalizers + - lrests/finalizers + - oraclerestdataservices/finalizers + - ordssrvs/finalizers + - singleinstancedatabases/finalizers verbs: - - create - - delete - - get - - list - - patch - update - - watch - apiGroups: - database.oracle.com resources: + - dbcssystems/finalizers + - lrpdbs/finalizers + - pdbs/finalizers - shardingdatabases/finalizers verbs: - create @@ -1047,17 +13904,21 @@ rules: - patch - update - apiGroups: - - database.oracle.com + - monitoring.coreos.com resources: - - shardingdatabases/status + - servicemonitors verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases + - databaseobservers verbs: - create - delete @@ -1067,19 +13928,27 @@ rules: - update - watch - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases/finalizers + - databaseobservers/finalizers verbs: - update - apiGroups: - - database.oracle.com + - observability.oracle.com resources: - - singleinstancedatabases/status + - databaseobservers/status verbs: - get - patch - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1124,9 +13993,10 @@ subjects: namespace: oracle-database-operator-system --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: name: oracle-database-operator-oracle-database-operator-manager-rolebinding + namespace: oracle-database-operator-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -1139,11 +14009,11 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: oracle-database-operator-oracle-database-operator-proxy-rolebinding + name: oracle-database-operator-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: oracle-database-operator-oracle-database-operator-proxy-role subjects: - kind: ServiceAccount name: default @@ -1164,18 +14034,693 @@ spec: selector: control-plane: controller-manager --- -apiVersion: v1 -kind: Service +apiVersion: v1 +kind: Service +metadata: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: oracle-database-operator-serving-cert + namespace: oracle-database-operator-system +spec: + dnsNames: + - oracle-database-operator-webhook-service.oracle-database-operator-system.svc + - oracle-database-operator-webhook-service.oracle-database-operator-system.svc.cluster.local + issuerRef: + kind: Issuer + name: oracle-database-operator-selfsigned-issuer + secretName: webhook-server-cert +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: oracle-database-operator-selfsigned-issuer + namespace: oracle-database-operator-system +spec: + selfSigned: {} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + name: oracle-database-operator-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: mcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: mlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: mlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: mpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-dataguardbroker + failurePolicy: Fail + name: mdataguardbroker.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - dataguardbrokers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-oraclerestdataservice + failurePolicy: Fail + name: moraclerestdataservice.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - oraclerestdataservices + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-singleinstancedatabase + failurePolicy: Fail + name: msingleinstancedatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - singleinstancedatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration metadata: - name: oracle-database-operator-webhook-service - namespace: oracle-database-operator-system -spec: - ports: - - port: 443 - protocol: TCP - targetPort: 9443 - selector: - control-plane: controller-manager + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + name: oracle-database-operator-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-autonomouscontainerdatabase + failurePolicy: Fail + name: vautonomouscontainerdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomouscontainerdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: vlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: vlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: vpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: vshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase + failurePolicy: Fail + name: vautonomouscontainerdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomouscontainerdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-autonomousdatabase + failurePolicy: Fail + name: vautonomousdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-dataguardbroker + failurePolicy: Fail + name: vdataguardbroker.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - dataguardbrokers + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-oraclerestdataservice + failurePolicy: Fail + name: voraclerestdataservice.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - oraclerestdataservices + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: vshardingdatabasev1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-singleinstancedatabase + failurePolicy: Fail + name: vsingleinstancedatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - singleinstancedatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None --- apiVersion: apps/v1 kind: Deployment @@ -1199,7 +14744,10 @@ spec: - --enable-leader-election command: - /manager - image: container-registry.oracle.com/database/operator:0.1.0 + env: + - name: WATCH_NAMESPACE + value: "" + image: container-registry.oracle.com/database/operator:1.2.0 imagePullPolicy: Always name: manager ports: @@ -1217,8 +14765,6 @@ spec: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true - imagePullSecrets: - - name: container-registry-secret terminationGracePeriodSeconds: 10 volumes: - name: cert @@ -1226,83 +14772,3 @@ spec: defaultMode: 420 secretName: webhook-server-cert --- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: oracle-database-operator-serving-cert - namespace: oracle-database-operator-system -spec: - dnsNames: - - oracle-database-operator-webhook-service.oracle-database-operator-system.svc - - oracle-database-operator-webhook-service.oracle-database-operator-system.svc.cluster.local - issuerRef: - kind: Issuer - name: oracle-database-operator-selfsigned-issuer - secretName: webhook-server-cert ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: oracle-database-operator-selfsigned-issuer - namespace: oracle-database-operator-system -spec: - selfSigned: {} ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - name: oracle-database-operator-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: oracle-database-operator-webhook-service - namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-singleinstancedatabase - failurePolicy: Fail - name: msingleinstancedatabase.kb.io - rules: - - apiGroups: - - database.oracle.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - singleinstancedatabases - sideEffects: None ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - name: oracle-database-operator-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: oracle-database-operator-webhook-service - namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-singleinstancedatabase - failurePolicy: Fail - name: vsingleinstancedatabase.kb.io - rules: - - apiGroups: - - database.oracle.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - - DELETE - resources: - - singleinstancedatabases - sideEffects: None diff --git a/ords/Dockerfile b/ords/Dockerfile new file mode 100644 index 00000000..25ba08ec --- /dev/null +++ b/ords/Dockerfile @@ -0,0 +1,86 @@ +## Copyright (c) 2022 Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + +FROM container-registry.oracle.com/java/jdk:latest + +# Environment variables required for this build (do NOT change) +# ------------------------------------------------------------- +ENV ORDS_HOME=/opt/oracle/ords/ \ + RUN_FILE="runOrdsSSL.sh" \ + ORDSVERSION=23.4.0-8 \ + JAVA=17 +#see https://www.oracle.com/tools/ords/ords-relnotes-23.4.0.html + +# Copy binaries +# ------------- +COPY $RUN_FILE $ORDS_HOME + +RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && \ + yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && \ + yum -y install java-$JAVA-openjdk-devel && \ + yum -y install iproute && \ + yum clean all + +RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + +RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + +# Setup filesystem and oracle user +# -------------------------------- +RUN mkdir -p $ORDS_HOME/doc_root && \ + mkdir -p $ORDS_HOME/error && \ + mkdir -p $ORDS_HOME/secrets && \ + chmod ug+x $ORDS_HOME/*.sh && \ + groupadd -g 54322 dba && \ + usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && \ + chown -R oracle:dba $ORDS_HOME +# echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +RUN echo "unset R1" >> /home/oracle/.bashrc && \ + chown root:root /home/oracle/.bashrc && chmod +r /home/oracle/.bashrc + +# Finalize setup +# ------------------- +USER oracle +WORKDIR /home/oracle + + +VOLUME ["$ORDS_HOME/config/ords"] +EXPOSE 8888 + +# Define default command to start Ords Services +CMD $ORDS_HOME/$RUN_FILE + diff --git a/ords/ords_init.sh b/ords/ords_init.sh new file mode 100644 index 00000000..0994dceb --- /dev/null +++ b/ords/ords_init.sh @@ -0,0 +1,484 @@ +#!/bin/bash +## Copyright (c) 2006, 2024, Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + +dump_stack(){ +_log_date=`date "+%y:%m:%d %H:%M:%S"` + local frame=0 + local line_no + local function_name + local file_name + echo -e "BACKTRACE [${_log_date}]\n" + echo -e "filename:line\tfunction " + echo -e "------------- --------" + while caller $frame ;do ((frame++)) ;done | \ + while read line_no function_name file_name;\ + do echo -e "$file_name:$line_no\t$function_name" ;done >&2 +} + + + +get_conn_string() { + local -n _conn_string="${1}" + + local -r _admin_user=$($ords_cfg_cmd get --secret db.adminUser | tail -1) + local _conn_type=$($ords_cfg_cmd get db.connectionType |tail -1) + if [[ $_conn_type == "customurl" ]]; then + local -r _conn=$($ords_cfg_cmd get db.customURL | tail -1) + elif [[ $_conn_type == "tns" ]]; then + local -r _tns_service=$($ords_cfg_cmd get db.tnsAliasName | tail -1) + local -r _conn=${_tns_service} + elif [[ $_conn_type == "basic" ]]; then + local -r _host=$($ords_cfg_cmd get db.hostname | tail -1) + local -r _port=$($ords_cfg_cmd get db.port | tail -1) + local -r _service=$($ords_cfg_cmd get db.servicename | tail -1) + local -r _sid=$($ords_cfg_cmd get db.sid | tail -1) + + if [[ -n ${_host} ]] && [[ -n ${_port} ]]; then + if [[ -n ${_service} ]] || [[ -n ${_sid} ]]; then + local -r _conn=${_host}:${_port}/${_service:-$_sid} + fi + fi + else + # wallet + _conn_type="wallet" + local -r _wallet_service=$($ords_cfg_cmd get db.wallet.zip.service | tail -1) + local -r _conn=${_wallet_service} + fi + + if [[ -n ${_conn} ]]; then + echo "Connection String (${_conn_type}): ${_conn}" + _conn_string="${_admin_user%%/ *}/${config["dbadminusersecret"]}@${_conn}" + if [[ ${_admin_user%%/ *} == "SYS" ]]; then + _conn_string="${_conn_string=} AS SYSDBA" + fi + fi +} + +#------------------------------------------------------------------------------ +function run_sql { + local -r _conn_string="${1}" + local -r _sql="${2}" + local -n _output="${3}" + local -i _rc=0 + + if [[ -z ${_sql} ]]; then + dump_stack + echo "FATAL: Dear Developer.. you've got a bug calling run_sql" && exit 1 + fi + ## Get TNS_ADMIN location + local -r _tns_admin=$($ords_cfg_cmd get db.tnsDirectory | tail -1) + if [[ ! $_tns_admin =~ "Cannot get setting" ]]; then + echo "Setting: TNS_ADMIN=${_tns_admin}" + export TNS_ADMIN=${_tns_admin} + fi + + ## Get ADB Wallet + local -r _wallet_zip_path=$($ords_cfg_cmd get db.wallet.zip.path | tail -1) + if [[ ! $_wallet_zip_path =~ "Cannot get setting" ]]; then + echo "Using: set cloudconfig ${_wallet_zip_path}" + local -r _cloudconfig="set cloudconfig ${_wallet_zip_path}" + fi + + # NOTE to maintainer; the heredoc must be TAB indented + echo "Running SQL..." + #_output=$(cd ${APEX_HOME}/${APEX_VER} && sql -S /nolog <<-EOSQL + _output=$(cd ${APEX_HOME}/${APEX_VER} && sql -S -nohistory -noupdates /nolog <<-EOSQL + WHENEVER SQLERROR EXIT 1 + WHENEVER OSERROR EXIT 1 + ${_cloudconfig} + connect $_conn_string + set serveroutput on echo off pause off feedback off + set heading off wrap off linesize 1000 pagesize 0 + SET TERMOUT OFF VERIFY OFF + ${_sql} + exit; + EOSQL + ) + _rc=$? + + if (( ${_rc} > 0 )); then + dump_stack + echo "SQLERROR: ${_output}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +function check_adb() { + local -r _conn_string=$1 + local -n _is_adb=$2 + + local -r _adb_chk_sql=" + DECLARE + invalid_column exception; + pragma exception_init (invalid_column,-00904); + adb_check integer; + BEGIN + EXECUTE IMMEDIATE q'[SELECT COUNT(*) FROM ( + SELECT JSON_VALUE(cloud_identity, '\$.DATABASE_OCID') AS database_ocid + FROM v\$pdbs) t + WHERE t.database_ocid like '%AUTONOMOUS%']' INTO adb_check; + DBMS_OUTPUT.PUT_LINE(adb_check); + EXCEPTION WHEN invalid_column THEN + DBMS_OUTPUT.PUT_LINE('0'); + END; + /" + echo "Checking if Database is an ADB" + run_sql "${_conn_string}" "${_adb_chk_sql}" "_adb_check" + _rc=$? + + if (( ${_rc} == 0 )); then + _adb_check=${_adb_check//[[:space:]]/} + echo "ADB Check: ${_adb_check}" + if (( ${_adb_check} == 1 )); then + _is_adb=${_adb_check//[[:space:]]/} + fi + fi + + return ${_rc} +} + +function create_adb_user() { + local -r _conn_string="${1}" + local -r _pool_name="${2}" + + local _config_user=$($ords_cfg_cmd get db.username | tail -1) + + if [[ -z ${_config_user} ]] || [[ ${_config_user} == "ORDS_PUBLIC_USER" ]]; then + echo "FATAL: You must specify a db.username <> ORDS_PUBLIC_USER in pool ${_pool_name}" + dump_stack + return 1 + fi + + local -r _adb_user_sql=" + DECLARE + l_user VARCHAR2(255); + l_cdn VARCHAR2(255); + BEGIN + BEGIN + SELECT USERNAME INTO l_user FROM DBA_USERS WHERE USERNAME='${_config_user}'; + EXECUTE IMMEDIATE 'ALTER USER \"${_config_user}\" PROFILE ORA_APP_PROFILE'; + EXECUTE IMMEDIATE 'ALTER USER \"${_config_user}\" IDENTIFIED BY \"${config["dbsecret"]}\"'; + DBMS_OUTPUT.PUT_LINE('${_config_user} Exists - Password reset'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + EXECUTE IMMEDIATE 'CREATE USER \"${_config_user}\" IDENTIFIED BY \"${config["dbsecret"]}\" PROFILE ORA_APP_PROFILE'; + DBMS_OUTPUT.PUT_LINE('${_config_user} Created'); + END; + EXECUTE IMMEDIATE 'GRANT CONNECT TO \"${_config_user}\"'; + BEGIN + SELECT USERNAME INTO l_user FROM DBA_USERS WHERE USERNAME='ORDS_PLSQL_GATEWAY_OPER'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" PROFILE DEFAULT'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" NO AUTHENTICATION'; + DBMS_OUTPUT.PUT_LINE('ORDS_PLSQL_GATEWAY_OPER Exists'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + EXECUTE IMMEDIATE 'CREATE USER \"ORDS_PLSQL_GATEWAY_OPER\" NO AUTHENTICATION PROFILE DEFAULT'; + DBMS_OUTPUT.PUT_LINE('ORDS_PLSQL_GATEWAY_OPER Created'); + END; + EXECUTE IMMEDIATE 'GRANT CONNECT TO \"ORDS_PLSQL_GATEWAY_OPER\"'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" GRANT CONNECT THROUGH \"${_config_user}\"'; + ORDS_ADMIN.PROVISION_RUNTIME_ROLE ( + p_user => '${_config_user}' + ,p_proxy_enabled_schemas => TRUE + ); + ORDS_ADMIN.CONFIG_PLSQL_GATEWAY ( + p_runtime_user => '${_config_user}' + ,p_plsql_gateway_user => 'ORDS_PLSQL_GATEWAY_OPER' + ); + -- TODO: Only do this if ADB APEX Version <> this ORDS Version + BEGIN + SELECT images_version INTO L_CDN + FROM APEX_PATCHES + where is_bundle_patch = 'Yes' + order by patch_version desc + fetch first 1 rows only; + EXCEPTION WHEN NO_DATA_FOUND THEN + select version_no INTO L_CDN + from APEX_RELEASE; + END; + apex_instance_admin.set_parameter( + p_parameter => 'IMAGE_PREFIX', + p_value => 'https://static.oracle.com/cdn/apex/'||L_CDN||'/' + ); + END; + /" + + run_sql "${_conn_string}" "${_adb_user_sql}" "_adb_user_sql_output" + _rc=$? + + echo "Installation Output: ${_adb_user_sql_output}" + return ${_rc} +} + +#------------------------------------------------------------------------------ +function compare_versions() { + local _db_ver=$1 + local _im_ver=$2 + + IFS='.' read -r -a _db_ver_array <<< "$_db_ver" + IFS='.' read -r -a _im_ver_array <<< "$_im_ver" + + # Compare each component + local i + for i in "${!_db_ver_array[@]}"; do + if [[ "${_db_ver_array[$i]}" -lt "${_im_ver_array[$i]}" ]]; then + # _db_ver < _im_ver (upgrade) + return 0 + elif [[ "${_db_ver_array[$i]}" -gt "${_im_ver_array[$i]}" ]]; then + # _db_ver < _im_ver (do nothing) + return 1 + fi + done + # _db_ver == __im_ver (do nothing) + return 1 +} + +#------------------------------------------------------------------------------ +set_secret() { + local -r _pool_name="${1}" + local -r _config_key="${2}" + local -r _config_val="${3}" + local -i _rc=0 + + if [[ -n "${_config_val}" ]]; then + ords --config "$ORDS_CONFIG" config --db-pool "${_pool_name}" secret --password-stdin "${_config_key}" <<< "${_config_val}" + _rc=$? + echo "${_config_key} in pool ${_pool_name} set" + else + echo "${_config_key} in pool ${_pool_name}, not defined" + _rc=0 + fi + + return ${_rc} +} + +#------------------------------------------------------------------------------ +ords_upgrade() { + local -r _pool_name="${1}" + local -r _upgrade_key="${2}" + local -i _rc=0 + + if [[ -n "${config["dbadminusersecret"]}" ]]; then + # Get usernames + local -r ords_user=$($ords_cfg_cmd get db.username | tail -1) + local -r ords_admin=$($ords_cfg_cmd get db.adminUser | tail -1) + + echo "Performing ORDS install/upgrade as $ords_admin into $ords_user on pool ${_pool_name}" + if [[ ${_pool_name} == "default" ]]; then + ords --config "$ORDS_CONFIG" install --db-only \ + --admin-user "$ords_admin" --password-stdin <<< "${config["dbadminusersecret"]}" + _rc=$? + else + ords --config "$ORDS_CONFIG" install --db-pool "${_pool_name}" --db-only \ + --admin-user "$ords_admin" --password-stdin <<< "${config["dbadminusersecret"]}" + _rc=$? + fi + + # Dar be bugs below deck with --db-user so using the above + # ords --config "$ORDS_CONFIG" install --db-pool "$1" --db-only \ + # --admin-user "$ords_admin" --db-user "$ords_user" --password-stdin <<< "${!2}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +function get_apex_version() { + local -r _conn_string="${1}" + local -n _action="${2}" + local -i _rc=0 + + local -r _ver_sql="SELECT VERSION FROM DBA_REGISTRY WHERE COMP_ID='APEX';" + run_sql "${_conn_string}" "${_ver_sql}" "_db_apex_version" + _rc=$? + + if (( $_rc > 0 )); then + echo "FATAL: Unable to connect to ${_conn_string} to get APEX version" + dump_stack + return $_rc + fi + + local -r _db_apex_version=${_db_apex_version//[^0-9.]/} + echo "Database APEX Version: ${_db_apex_version:-Not Installed}" + + _action="none" + if [[ -z "${_db_apex_version}" ]]; then + echo "Installing APEX ${APEX_VER}" + _action="install" + elif compare_versions ${_db_apex_version} ${APEX_VER}; then + echo "Upgrading from ${_db_apex_version} to ${APEX_VER}" + _action="upgrade" + else + echo "No Installation/Upgrade Required" + fi + + return $_rc +} + +apex_upgrade() { + local -r _conn_string="${1}" + local -r _upgrade_key="${2}" + local -i _rc=0 + + if [[ -f ${APEX_HOME}/${APEX_VER}/apexins.sql ]] && [[ "${!_upgrade_key}" = "true" ]]; then + echo "Starting Installation of APEX ${APEX_VER}" + local -r _install_sql="@apxsilentins.sql SYSAUX SYSAUX TEMP /i/ ${config["dbsecret"]} ${config["dbsecret"]} ${config["dbsecret"]} ${config["dbsecret"]}" + run_sql "${_conn_string}" "${_install_sql}" "_install_output" + _rc=$? + echo "Installation Output: ${_install_output}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +# INIT +#------------------------------------------------------------------------------ +declare -A pool_exit +for pool in "$ORDS_CONFIG"/databases/*; do + rc=0 + pool_name=$(basename "$pool") + pool_exit[${pool_name}]=0 + ords_cfg_cmd="ords --config $ORDS_CONFIG config --db-pool ${pool_name}" + echo "Found Pool: $pool_name..." + + declare -A config + for key in dbsecret dbadminusersecret dbcdbadminusersecret; do + var_key="${pool_name//-/_}_${key}" + echo "Obtaining value from initContainer variable: ${var_key}" + var_val="${!var_key}" + config[${key}]="${var_val}" + done + + # Set Secrets + set_secret "${pool_name}" "db.password" "${config["dbsecret"]}" + rc=$((rc + $?)) + set_secret "${pool_name}" "db.adminUser.password" "${config["dbadminusersecret"]}" + rc=$((rc + $?)) + set_secret "${pool_name}" "db.cdb.adminUser.password" "${config["dbcdbadminusersecret"]}" + rc=$((rc + $?)) + + if (( ${rc} > 0 )); then + echo "FATAL: Unable to set configuration for pool ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + elif [[ -z ${config["dbsecret"]} ]]; then + echo "FATAL: db.password must be specified for ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + elif [[ -z ${config["dbadminusersecret"]} ]]; then + echo "INFO: No additional configuration for ${pool_name}" + continue + fi + + get_conn_string "conn_string" + if [[ -z ${conn_string} ]]; then + echo "FATAL: Unable to get ${pool_name} database connect string" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + + check_adb "${conn_string}" "is_adb" + rc=$? + if (( ${rc} > 0 )); then + pool_exit[${pool_name}]=1 + continue + fi + + if (( is_adb )); then + # Create ORDS User + echo "Processing ADB in Pool: ${pool_name}" + create_adb_user "${conn_string}" "${pool_name}" + else + # APEX Upgrade + echo "---------------------------------------------------" + apex_upgrade_var=${pool_name}_autoupgrade_apex + if [[ ${!apex_upgrade_var} != "true" ]]; then + echo "APEX Install/Upgrade not requested for ${pool_name}" + continue + fi + + get_apex_version "${conn_string}" "action" + if [[ -z ${action} ]]; then + echo "FATAL: Unable to get ${pool_name} APEX Version" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + + if [[ ${action} != "none" ]]; then + apex_upgrade "${conn_string}" "${pool_name}_autoupgrade_apex" + if (( $? > 0 )); then + echo "FATAL: Unable to ${action} APEX for ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + fi + + # ORDS Upgrade + ords_upgrade_var=${pool_name}_autoupgrade_ords + if [[ ${!ords_upgrade_var} != "true" ]]; then + echo "ORDS Install/Upgrade not requested for ${pool_name}" + continue + fi + + ords_upgrade "${pool_name}" "${pool_name}_autoupgrade_ords" + rc=$? + if (( $rc > 0 )); then + echo "FATAL: Unable to preform requested ORDS install/upgrade on ${pool_name}" + pool_exit[${pool_name}]=1 + dump_stack + continue + fi + fi +done + +for key in "${!pool_exit[@]}"; do + echo "Pool: $key, Exit Code: ${pool_exit[$key]}" + if (( ${pool_exit[$key]} > 0 )); then + rc=1 + fi +done + +exit $rc +#exit 0 diff --git a/ords/runOrdsSSL.sh b/ords/runOrdsSSL.sh new file mode 100644 index 00000000..07e2b931 --- /dev/null +++ b/ords/runOrdsSSL.sh @@ -0,0 +1,197 @@ +#!/bin/bash + +cat <$TNSNAME + + +function SetParameter() { + ##ords config info <--- Use this command to get the list + +[[ ! -z "${ORACLE_HOST}" && -z "${DBTNSURL}" ]] && { + $ORDS --config ${CONFIG} config set db.hostname ${ORACLE_HOST:-racnode1} + $ORDS --config ${CONFIG} config set db.port ${ORACLE_PORT:-1521} + $ORDS --config ${CONFIG} config set db.servicename ${ORACLE_SERVICE:-TESTORDS} +} + +[[ -z "${ORACLE_HOST}" && ! -z "${DBTNSURL}" ]] && { + #$ORDS --config ${CONFIG} config set db.tnsAliasName ${TNSALIAS} + #$ORDS --config ${CONFIG} config set db.tnsDirectory ${TNS_ADMIN} + #$ORDS --config ${CONFIG} config set db.connectionType tns + + $ORDS --config ${CONFIG} config set db.connectionType customurl + $ORDS --config ${CONFIG} config set db.customURL jdbc:oracle:thin:@${DBTNSURL} +} + + $ORDS --config ${CONFIG} config set security.requestValidationFunction false + $ORDS --config ${CONFIG} config set jdbc.MaxLimit 100 + $ORDS --config ${CONFIG} config set jdbc.InitialLimit 50 + $ORDS --config ${CONFIG} config set error.externalPath ${ERRORFOLDER} + $ORDS --config ${CONFIG} config set standalone.access.log /home/oracle + $ORDS --config ${CONFIG} config set standalone.https.port 8888 + $ORDS --config ${CONFIG} config set standalone.https.cert ${CERTIFICATE} + $ORDS --config ${CONFIG} config set standalone.https.cert.key ${KEY} + $ORDS --config ${CONFIG} config set restEnabledSql.active true + $ORDS --config ${CONFIG} config set security.verifySSL true + $ORDS --config ${CONFIG} config set database.api.enabled true + $ORDS --config ${CONFIG} config set plsql.gateway.mode disabled + $ORDS --config ${CONFIG} config set database.api.management.services.disabled false + $ORDS --config ${CONFIG} config set misc.pagination.maxRows 1000 + $ORDS --config ${CONFIG} config set db.cdb.adminUser "${CDBADMIN_USER:-C##DBAPI_CDB_ADMIN} AS SYSDBA" + $ORDS --config ${CONFIG} config secret --password-stdin db.cdb.adminUser.password << EOF +${CDBADMIN_PWD:-PROVIDE_A_PASSWORD} +EOF + +$ORDS --config ${CONFIG} config user add --password-stdin ${WEBSERVER_USER:-ordspdbadmin} "SQL Administrator, System Administrator" < $ORDS_HOME/k.txt + + +export ORDS_LOGS=/tmp + + [ -f $ORDS_HOME/secrets/$WEBSERVER_USER_KEY ] && + { + WEBSERVER_USER=$(cat /opt/oracle/ords/secrets/${WEBSERVER_USER_KEY}|base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + [ -f $ORDS_HOME/secrets/$WEBSERVER_PASSWORD_KEY ] && + { + WEBSERVER_PASSWORD=$(cat /opt/oracle/ords/secrets/${WEBSERVER_PASSWORD_KEY}|base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + [ -f $ORDS_HOME/secrets/$CDBADMIN_USER_KEY ] && + { + CDBADMIN_USER=$(cat /opt/oracle/ords/secrets/${CDBADMIN_USER_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + [ -f $ORDS_HOME/secrets/$CDBADMIN_PWD_KEY ] && + { + CDBADMIN_PWD=$(cat /opt/oracle/ords/secrets/${CDBADMIN_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + [ -f $ORDS_HOME/secrets/$ORACLE_PWD_KEY ] && + { + #SYSDBA_PASSWORD=`cat $ORDS_HOME/secrets/$ORACLE_PWD_KEY` + SYSDBA_PASSWORD=$(cat $ORDS_HOME/secrets/${ORACLE_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + [ -f $ORDS_HOME/secrets/$ORACLE_PWD_KEY ] && + { + #ORDS_PASSWORD=`cat $ORDS_HOME/secrets/$ORDS_PWD_KEY` + ORDS_PASSWORD=$(cat $ORDS_HOME/secrets/${ORDS_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) + } + + +SetParameter; +$ORDS --config ${CONFIG} install \ + --admin-user ${SYSDBA_USER:-"SYS AS SYSDBA"} \ + --feature-db-api true \ + --feature-rest-enabled-sql true \ + --log-folder ${ORDS_LOGS} \ + --proxy-user \ + --password-stdin <${CKF} 2>&1 +echo "checkfile" >> ${CKF} +NOT_INSTALLED=`cat ${CKF} | grep "INFO: The" |wc -l ` +echo NOT_INSTALLED=$NOT_INSTALLED + + +function StartUp () { + $ORDS --config $CONFIG serve --port 8888 --secure +} + +# Check whether ords is already setup +if [ $NOT_INSTALLED -ne 0 ] +then + echo " SETUP " + setupOrds; + StartUp; +fi + +if [ $NOT_INSTALLED -eq 0 ] +then + echo " STARTUP " + StartUp; +fi + + diff --git a/rbac/cluster-role-binding.yaml b/rbac/cluster-role-binding.yaml new file mode 100644 index 00000000..1c609012 --- /dev/null +++ b/rbac/cluster-role-binding.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding + namespace: oracle-database-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +--- diff --git a/rbac/default-ns-role-binding.yaml b/rbac/default-ns-role-binding.yaml new file mode 100644 index 00000000..b737e1f1 --- /dev/null +++ b/rbac/default-ns-role-binding.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +--- diff --git a/rbac/node-rbac.yaml b/rbac/node-rbac.yaml new file mode 100644 index 00000000..ac474873 --- /dev/null +++ b/rbac/node-rbac.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oracle-database-operator-manager-role-node +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: oracle-database-operator-manager-role-node-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role-node +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +--- diff --git a/rbac/persistent-volume-rbac.yaml b/rbac/persistent-volume-rbac.yaml new file mode 100644 index 00000000..bce9733d --- /dev/null +++ b/rbac/persistent-volume-rbac.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oracle-database-operator-manager-role-persistent-volume +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: oracle-database-operator-manager-role-persistent-volume-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role-persistent-volume +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +--- diff --git a/rbac/storage-class-rbac.yaml b/rbac/storage-class-rbac.yaml new file mode 100644 index 00000000..a34f67d4 --- /dev/null +++ b/rbac/storage-class-rbac.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oracle-database-operator-manager-role-storage-class +rules: +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: oracle-database-operator-manager-role-storage-class-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role-storage-class +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +--- diff --git a/set_ocicredentials.sh b/set_ocicredentials.sh index fe2e5a5c..dddf62c4 100755 --- a/set_ocicredentials.sh +++ b/set_ocicredentials.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # Parse command line arguments @@ -53,7 +53,7 @@ EOF exit 0 ;; *) # unknown command - echo "Unknow command. Use [set_ocicredentials -h] for help." + echo "Unknown command. Use [set_ocicredentials -h] for help." exit 1 shift # past argument ;; diff --git a/test/e2e/autonomouscontainerdatabase_test.go b/test/e2e/autonomouscontainerdatabase_test.go new file mode 100644 index 00000000..a76fc33f --- /dev/null +++ b/test/e2e/autonomouscontainerdatabase_test.go @@ -0,0 +1,142 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package e2etest + +import ( + "context" + "time" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" + // +kubebuilder:scaffold:imports +) + +var _ = Describe("test ACD binding", func() { + var acdLookupKey types.NamespacedName + var acdID string + + AfterEach(func() { + // IMPORTANT: The operator might have to call reconcile multiple times to finish an operation. + // If we do the update immediately, the previous reconciliation will overwrite the changes. + By("Sleeping 20 seconds to wait for reconciliation to finish") + time.Sleep(time.Second * 20) + }) + + Describe("ACD Provisioning", func() { + It("Should create an AutonomousContainerDatabase resource and in OCI", func() { + provisionAcd := &dbv1alpha1.AutonomousContainerDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousContainerDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "provisionacd", + Namespace: ADBNamespace, + }, + Spec: dbv1alpha1.AutonomousContainerDatabaseSpec{ + DisplayName: common.String(e2eutil.GenerateACDName()), + CompartmentOCID: common.String(SharedCompartmentOCID), + AutonomousExadataVMClusterOCID: common.String(SharedExadataVMClusterOCID), + PatchModel: database.AutonomousContainerDatabasePatchModelUpdates, + OCIConfig: dbv1alpha1.OciConfigSpec{ + ConfigMapName: common.String(SharedOCIConfigMapName), + SecretName: common.String(SharedOCISecretName), + }, + }, + } + + acdLookupKey = types.NamespacedName{Name: provisionAcd.Name, Namespace: provisionAcd.Namespace} + + Expect(k8sClient.Create(context.TODO(), provisionAcd)).Should(Succeed()) + }) + + It("Should check ACD status is BACKUP IN PROGRESS", e2ebehavior.AssertACDState(&k8sClient, &dbClient, &acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateBackupInProgress, time.Minute*35)) + + It("Should check ACD status is AVAILABLE", e2ebehavior.AssertACDState(&k8sClient, &dbClient, &acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateAvailable, time.Minute*60)) + + It("Should save ACD ocid for next test", func() { + acd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(k8sClient.Get(context.TODO(), acdLookupKey, acd)).To(Succeed()) + acdID = *acd.Spec.AutonomousContainerDatabaseOCID + }) + + It("Should delete ACD local resource", e2ebehavior.AssertACDLocalDelete(&k8sClient, &dbClient, &acdLookupKey)) + }) + + Describe("ACD Binding", func() { + It("Should create an AutonomousContainerDatabase resource", func() { + acd := &dbv1alpha1.AutonomousContainerDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousContainerDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bindacd", + Namespace: ADBNamespace, + }, + Spec: dbv1alpha1.AutonomousContainerDatabaseSpec{ + AutonomousContainerDatabaseOCID: common.String(acdID), + OCIConfig: dbv1alpha1.OciConfigSpec{ + ConfigMapName: common.String(SharedOCIConfigMapName), + SecretName: common.String(SharedOCISecretName), + }, + }, + } + + acdLookupKey = types.NamespacedName{Name: acd.Name, Namespace: acd.Namespace} + + Expect(k8sClient.Create(context.TODO(), acd)).Should(Succeed()) + }) + + It("Should bind to an ACD", e2ebehavior.AssertACDBind(&k8sClient, &dbClient, &acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateAvailable)) + + It("Should update the ACD", e2ebehavior.UpdateAndAssertACDSpec(&k8sClient, &dbClient, &acdLookupKey)) + + It("Should restart the ACD", e2ebehavior.AssertACDRestart(&k8sClient, &dbClient, &acdLookupKey)) + + It("Should terminate the ACD", e2ebehavior.AssertACDTerminate(&k8sClient, &dbClient, &acdLookupKey)) + }) +}) diff --git a/test/e2e/autonomousdatabase_controller_bind_test.go b/test/e2e/autonomousdatabase_controller_bind_test.go index ff415da9..48e60f0d 100644 --- a/test/e2e/autonomousdatabase_controller_bind_test.go +++ b/test/e2e/autonomousdatabase_controller_bind_test.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,18 +42,16 @@ import ( "context" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" - "github.com/oracle/oci-go-sdk/v45/workrequests" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -61,7 +59,6 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var _ = Describe("test ADB binding with hardLink=true", func() { - const bindingHardLinkTestFileName = "bind_adb_hardLink.yaml" var adbLookupKey types.NamespacedName const downloadedWallet = "instance-wallet-secret-1" var adbID *string @@ -91,10 +88,6 @@ var _ = Describe("test ADB binding with hardLink=true", func() { err = e2eutil.WaitUntilWorkCompleted(workClient, createResp.OpcWorkRequestId) Expect(err).ShouldNot(HaveOccurred()) - - // listResp, err := e2eutil.ListAutonomousDatabases(dbClient, &SharedCompartmentOCID, &dbName) - // Expect(err).ShouldNot(HaveOccurred()) - // fmt.Printf("List request DB %s is in %s state \n", *listResp.Items[0].DisplayName, listResp.Items[0].LifecycleState) }) Describe("ADB binding with HardLink = false using Wallet Password Secret", func() { @@ -110,16 +103,18 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: adbID, - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - K8sSecretName: common.String(SharedWalletPassSecretName), + Id: adbID, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedWalletPassSecretName), }, }, }, HardLink: common.Bool(false), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -135,11 +130,21 @@ var _ = Describe("test ADB binding with hardLink=true", func() { It("Should download an instance wallet using the password from K8s Secret "+SharedWalletPassSecretName, e2ebehavior.AssertWallet(&k8sClient, &adbLookupKey)) - It("should update ADB", e2ebehavior.AssertUpdate(&k8sClient, &dbClient, &adbLookupKey)) + It("should update ADB", e2ebehavior.UpdateAndAssertDetails(&k8sClient, &dbClient, &adbLookupKey, SharedNewAdminPassSecretName, &SharedPlainTextNewAdminPassword, &SharedPlainTextWalletPassword)) + + It("Should stop ADB", e2ebehavior.UpdateAndAssertADBState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateStopped)) + + It("Should restart ADB", e2ebehavior.UpdateAndAssertADBState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) + + It("Should change to RESTRICTED network access", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, []string{"192.168.0.1"}, false)) + + It("Should change isMTLSConnectionRequired to false", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, []string{"192.168.0.1"}, false)) - It("Should stop ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateStopped)) + It("Should should change to PRIVATE network access", e2ebehavior.TestNetworkAccessPrivate(&k8sClient, &dbClient, &adbLookupKey, false, &SharedSubnetOCID, &SharedNsgOCID)) - It("Should restart ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) + It("Should change isMTLSConnectionRequired to true when network access is PRIVATE", e2ebehavior.TestNetworkAccessPrivate(&k8sClient, &dbClient, &adbLookupKey, true, &SharedSubnetOCID, &SharedNsgOCID)) + + It("Should return to PUBLIC access type", e2ebehavior.TestNetworkAccessPublic(&k8sClient, &dbClient, &adbLookupKey)) It("Should delete the resource in cluster but not terminate the database in OCI", e2ebehavior.AssertSoftLinkDelete(&k8sClient, &adbLookupKey)) }) @@ -157,16 +162,18 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: adbID, - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - OCISecretOCID: common.String(SharedInstanceWalletPasswordOCID), + Id: adbID, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedInstanceWalletPasswordOCID), }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -182,12 +189,6 @@ var _ = Describe("test ADB binding with hardLink=true", func() { It("Should download an instance wallet using the password from OCI Secret OCID "+SharedInstanceWalletPasswordOCID, e2ebehavior.AssertWallet(&k8sClient, &adbLookupKey)) - It("should update ADB", e2ebehavior.AssertUpdate(&k8sClient, &dbClient, &adbLookupKey)) - - It("Should stop ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateStopped)) - - It("Should restart ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) - It("Should delete the resource in cluster and terminate the database in OCI", e2ebehavior.AssertHardLinkDelete(&k8sClient, &dbClient, &adbLookupKey)) }) @@ -195,7 +196,7 @@ var _ = Describe("test ADB binding with hardLink=true", func() { Describe("bind to a terminated adb", func() { //Wait until remote state is terminated - It("Should check that OCI adb state is terminated", e2ebehavior.AssertRemoteStateOCID(&k8sClient, &dbClient, &terminatedAdbID, database.AutonomousDatabaseLifecycleStateTerminated)) + It("Should check that OCI adb state is terminated", e2ebehavior.AssertADBRemoteStateOCID(&k8sClient, &dbClient, &terminatedAdbID, database.AutonomousDatabaseLifecycleStateTerminated, time.Second*300)) It("Should create a AutonomousDatabase resource", func() { adb := &dbv1alpha1.AutonomousDatabase{ @@ -209,10 +210,10 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: &terminatedAdbID, + Id: &terminatedAdbID, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -224,75 +225,8 @@ var _ = Describe("test ADB binding with hardLink=true", func() { Expect(k8sClient.Create(context.TODO(), adb)).Should(Succeed()) }) - It("Should check for TERMINATED state in local resource", e2ebehavior.AssertLocalState(&k8sClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateTerminated)) + It("Should check for TERMINATED state in local resource", e2ebehavior.AssertADBLocalState(&k8sClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateTerminated)) It("Should delete local resource", e2ebehavior.AssertSoftLinkDelete(&k8sClient, &adbLookupKey)) }) - - // Describe("Test ADB status", func() { - // var dbName string - // var backupID string - - // It("Should init the test", func() { - // By("creating a temp ADB in OCI for binding test") - // dbName = e2eutil.GenerateDBName() - // createResp, err := e2eutil.CreateAutonomousDatabase(dbClient, &SharedCompartmentOCID, &dbName, &SharedPlainTextAdminPassword) - // Expect(err).ShouldNot(HaveOccurred()) - // Expect(createResp.AutonomousDatabase.Id).ShouldNot(BeNil()) - - // By("Save the database ID for later use") - // adbID = createResp.AutonomousDatabase.Id - // backupID = *adbID - - // By("Wait until the work request is in SUCCEEDED status") - // workClient, err := workrequests.NewWorkRequestClientWithConfigurationProvider(configProvider) - // Expect(err).ShouldNot(HaveOccurred()) - - // err = e2eutil.WaitUntilWorkCompleted(workClient, createResp.OpcWorkRequestId) - // Expect(err).ShouldNot(HaveOccurred()) - // }) - - // It("Should create a AutonomousDatabase resource", func() { - // adb := &dbv1alpha1.AutonomousDatabase{ - // TypeMeta: metav1.TypeMeta{ - // APIVersion: "database.oracle.com/v1alpha1", - // Kind: "AutonomousDatabase", - // }, - // ObjectMeta: metav1.ObjectMeta{ - // Name: "bindadb", - // Namespace: ADBNamespace, - // }, - // Spec: dbv1alpha1.AutonomousDatabaseSpec{ - // Details: dbv1alpha1.AutonomousDatabaseDetails{ - // AutonomousDatabaseOCID: adbID, - // }, - // HardLink: common.Bool(true), - // OCIConfig: dbv1alpha1.OCIConfigSpec{ - // ConfigMapName: common.String(SharedOCIConfigMapName), - // SecretName: common.String(SharedOCISecretName), - // }, - // }, - // } - - // adbLookupKey = types.NamespacedName{Name: adb.Name, Namespace: adb.Namespace} - - // Expect(k8sClient.Create(context.TODO(), adb)).Should(Succeed()) - // }) - - // It("should bind to an ADB", e2ebehavior.AssertBind(&k8sClient, &adbLookupKey)) - - // It("should terminate ADB in a different routine", func() { - // err := e2eutil.DeleteAutonomousDatabase(dbClient, adbID) - // Expect(err).ToNot(HaveOccurred()) - // // By("Wait until the work request is in SUCCEEDED status") - // // workClient, err := workrequests.NewWorkRequestClientWithConfigurationProvider(configProvider) - // // Expect(err).ShouldNot(HaveOccurred()) - // // err = e2eutil.WaitUntilWorkCompleted(workClient, createResp.OpcWorkRequestId) - // // Expect(err).ShouldNot(HaveOccurred()) - // }) - - // It("should check for terminated state in OCI", e2ebehavior.AssertRemoteStateOCID(&k8sClient, &dbClient, &backupID, database.AutonomousDatabaseLifecycleStateTerminated)) - - // It("should check for terminated state in local resource", e2ebehavior.AssertLocalState(&k8sClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateTerminated)) - // }) }) diff --git a/test/e2e/autonomousdatabase_controller_create_test.go b/test/e2e/autonomousdatabase_controller_create_test.go index 3ecd2804..cc7fd288 100644 --- a/test/e2e/autonomousdatabase_controller_create_test.go +++ b/test/e2e/autonomousdatabase_controller_create_test.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,16 +42,14 @@ import ( "context" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -59,11 +57,6 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var _ = Describe("test ADB provisioning", func() { - const ( - changeStateTimeout = time.Second * 300 - changeStateInterval = time.Second * 10 - ) - AfterEach(func() { // IMPORTANT: The operator might have to call reconcile multiple times to finish an operation. // If we do the update immediately, the previous reconciliation will overwrite the changes. @@ -76,6 +69,8 @@ var _ = Describe("test ADB provisioning", func() { const downloadedWallet = "instance-wallet-secret-1" const resourceName = "createadb1" + const backupName = "adb-backup" + const restoreName = "adb-restore" duplicateAdbResourceName := "duplicateadb" var adbLookupKey = types.NamespacedName{Name: resourceName, Namespace: ADBNamespace} @@ -94,24 +89,30 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - K8sSecretName: common.String(SharedAdminPassSecretName), + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedAdminPassSecretName), + }, + }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - K8sSecretName: common.String(SharedWalletPassSecretName), + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedWalletPassSecretName), }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -135,18 +136,22 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - K8sSecretName: common.String(SharedAdminPassSecretName), + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedAdminPassSecretName), + }, + }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -156,15 +161,98 @@ var _ = Describe("test ADB provisioning", func() { Expect(k8sClient.Create(context.TODO(), duplicateAdb)).To(Succeed()) }) - It("Should check for local resource state UNAVAILABLE", e2ebehavior.AssertLocalState(&k8sClient, &dupAdbLookupKey, database.AutonomousDatabaseLifecycleStateUnavailable)) + It("Should check for local resource state \"\"", e2ebehavior.AssertADBLocalState(&k8sClient, &dupAdbLookupKey, "")) - It("Should download an instance wallet using the password from K8s Secret "+SharedWalletPassSecretName, e2ebehavior.AssertWallet(&k8sClient, &adbLookupKey)) + It("Should cleanup the resource with duplicated db name", func() { + duplicateAdb := &dbv1alpha1.AutonomousDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: duplicateAdbResourceName, + Namespace: ADBNamespace, + }, + } + Expect(k8sClient.Delete(context.TODO(), duplicateAdb)).To(Succeed()) + }) + + It("Should create an Autonomous Database Backup", func() { + e2ebehavior.AssertADBState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() - It("should update ADB", e2ebehavior.AssertUpdate(&k8sClient, &dbClient, &adbLookupKey)) + // Get adb ocid + adb := &dbv1alpha1.AutonomousDatabase{} + Expect(k8sClient.Get(context.TODO(), adbLookupKey, adb)).To(Succeed()) + databaseOCID := adb.Spec.Details.Id + tnsEntry := dbName + "_high" + err := e2ebehavior.ConfigureADBBackup(&dbClient, databaseOCID, &tnsEntry, &SharedPlainTextAdminPassword, &SharedPlainTextWalletPassword, &SharedBucketUrl, &SharedAuthToken, &SharedOciUser) + Expect(err).ShouldNot(HaveOccurred()) - It("Should stop ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateStopped)) + adbBackup := &dbv1alpha1.AutonomousDatabaseBackup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabaseBackup", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: backupName, + Namespace: ADBNamespace, + }, + Spec: dbv1alpha1.AutonomousDatabaseBackupSpec{ + Target: dbv1alpha1.TargetSpec{ + OciAdb: dbv1alpha1.OciAdbSpec{ + Ocid: common.String(*databaseOCID), + }, + }, + DisplayName: common.String(backupName), + OCIConfig: dbv1alpha1.OciConfigSpec{ + ConfigMapName: common.String(SharedOCIConfigMapName), + SecretName: common.String(SharedOCISecretName), + }, + }, + } - It("Should restart ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) + Expect(k8sClient.Create(context.TODO(), adbBackup)).To(Succeed()) + + backupLookupKey := types.NamespacedName{Name: backupName, Namespace: ADBNamespace} + e2ebehavior.AssertBackupRestore(&k8sClient, &dbClient, &backupLookupKey, &adbLookupKey, database.AutonomousDatabaseLifecycleStateBackupInProgress)() + }) + + It("Should restore a database", func() { + e2ebehavior.AssertADBState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() + + adbRestore := &dbv1alpha1.AutonomousDatabaseRestore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "database.oracle.com/v1alpha1", + Kind: "AutonomousDatabaseRestore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: restoreName, + Namespace: ADBNamespace, + }, + Spec: dbv1alpha1.AutonomousDatabaseRestoreSpec{ + Target: dbv1alpha1.TargetSpec{ + K8sAdb: dbv1alpha1.K8sAdbSpec{ + Name: common.String(resourceName), + }, + }, + Source: dbv1alpha1.SourceSpec{ + K8sAdbBackup: dbv1alpha1.K8sAdbBackupSpec{ + Name: common.String(backupName), + }, + }, + OCIConfig: dbv1alpha1.OciConfigSpec{ + ConfigMapName: common.String(SharedOCIConfigMapName), + SecretName: common.String(SharedOCISecretName), + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), adbRestore)).To(Succeed()) + restoreLookupKey := types.NamespacedName{Name: restoreName, Namespace: ADBNamespace} + e2ebehavior.AssertBackupRestore(&k8sClient, &dbClient, &restoreLookupKey, &adbLookupKey, database.AutonomousDatabaseLifecycleStateRestoreInProgress)() + }) + + It("Should download an instance wallet using the password from K8s Secret "+SharedWalletPassSecretName, e2ebehavior.AssertWallet(&k8sClient, &adbLookupKey)) It("Should delete the resource in cluster and terminate the database in OCI", e2ebehavior.AssertHardLinkDelete(&k8sClient, &dbClient, &adbLookupKey)) }) @@ -189,25 +277,30 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - OCISecretOCID: common.String(SharedAdminPasswordOCID), + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedAdminPasswordOCID), + }, + }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), - - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - OCISecretOCID: common.String(SharedInstanceWalletPasswordOCID), + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedInstanceWalletPasswordOCID), }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -221,12 +314,6 @@ var _ = Describe("test ADB provisioning", func() { It("Should download an instance wallet using the password from OCI Secret OCID "+SharedInstanceWalletPasswordOCID, e2ebehavior.AssertWallet(&k8sClient, &adbLookupKey)) - It("should update ADB", e2ebehavior.AssertUpdate(&k8sClient, &dbClient, &adbLookupKey)) - - It("Should stop ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateStopped)) - - It("Should restart ADB", e2ebehavior.UpdateAndAssertState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) - It("Should delete the resource in cluster and terminate the database in OCI", e2ebehavior.AssertHardLinkDelete(&k8sClient, &dbClient, &adbLookupKey)) }) }) diff --git a/test/e2e/backup.sql b/test/e2e/backup.sql new file mode 100644 index 00000000..3663e6ff --- /dev/null +++ b/test/e2e/backup.sql @@ -0,0 +1,20 @@ +set cloudconfig -proxy=&1 &2 +connect ADMIN/&3@&4 +ALTER DATABASE PROPERTY SET default_backup_bucket='&5'; + +BEGIN +DBMS_CLOUD.DROP_CREDENTIAL( credential_name => 'DEF_CRED_NAME' ); +END; +/ + +BEGIN + DBMS_CLOUD.CREATE_CREDENTIAL( + credential_name => 'DEF_CRED_NAME', + username => '&6', + password => '&7' +); +END; +/ + +ALTER DATABASE PROPERTY SET DEFAULT_CREDENTIAL = 'ADMIN.DEF_CRED_NAME'; +exit \ No newline at end of file diff --git a/test/e2e/behavior/shared_behaviors.go b/test/e2e/behavior/shared_behaviors.go index 501a93b2..3d87ce94 100644 --- a/test/e2e/behavior/shared_behaviors.go +++ b/test/e2e/behavior/shared_behaviors.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -45,17 +45,22 @@ import ( "reflect" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "os" + "os/exec" + "strings" + dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" ) /************************************************************** @@ -65,12 +70,32 @@ import ( * to the function, which is likely to be nil or zero value. **************************************************************/ +var ( + Describe = ginkgo.Describe + By = ginkgo.By + GinkgoWriter = ginkgo.GinkgoWriter + Expect = gomega.Expect + BeNil = gomega.BeNil + Eventually = gomega.Eventually + Equal = gomega.Equal + Succeed = gomega.Succeed + HaveOccurred = gomega.HaveOccurred + BeNumerically = gomega.BeNumerically + BeTrue = gomega.BeTrue + changeTimeout = time.Second * 300 + provisionTimeout = time.Second * 15 + bindTimeout = time.Second * 30 + backupTimeout = time.Minute * 20 + intervalTime = time.Second * 10 + updateADBTimeout = time.Minute * 7 + changeLocalStateTimeout = time.Second * 600 + updateACDTimeout = time.Minute * 3 +) + func AssertProvision(k8sClient *client.Client, adbLookupKey *types.NamespacedName) func() { return func() { - // Set the timeout to 15 minutes. The provision operation might take up to 10 minutes + // Set provisionTimeout to 15 minutes. The provision operation might take up to 10 minutes // if we have already send too many requests to OCI. - provisionTimeout := time.Minute * 15 - provisionInterval := time.Second * 10 Expect(k8sClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -86,18 +111,16 @@ func AssertProvision(k8sClient *client.Client, adbLookupKey *types.NamespacedNam return nil, err } - return createdADB.Spec.Details.AutonomousDatabaseOCID, nil - }, provisionTimeout, provisionInterval).ShouldNot(BeNil()) + return createdADB.Spec.Details.Id, nil + }, provisionTimeout, intervalTime).ShouldNot(BeNil()) fmt.Fprintf(GinkgoWriter, "AutonomousDatabase DbName = %s, and AutonomousDatabaseOCID = %s\n", - *createdADB.Spec.Details.DbName, *createdADB.Spec.Details.AutonomousDatabaseOCID) + *createdADB.Spec.Details.DbName, *createdADB.Spec.Details.Id) } } func AssertBind(k8sClient *client.Client, adbLookupKey *types.NamespacedName) func() { return func() { - bindTimeout := time.Second * 30 - Expect(k8sClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -116,13 +139,13 @@ func AssertBind(k8sClient *client.Client, adbLookupKey *types.NamespacedName) fu if err != nil { return false } - return (boundADB.Spec.Details.CompartmentOCID != nil && + return (boundADB.Spec.Details.CompartmentId != nil && boundADB.Spec.Details.DbWorkload != "" && boundADB.Spec.Details.DbName != nil) }, bindTimeout).Should(Equal(true), "Attributes in the resource should not be empty") fmt.Fprintf(GinkgoWriter, "AutonomousDatabase DbName = %s, and AutonomousDatabaseOCID = %s\n", - *boundADB.Spec.Details.DbName, *boundADB.Spec.Details.AutonomousDatabaseOCID) + *boundADB.Spec.Details.DbName, *boundADB.Spec.Details.Id) } } @@ -141,10 +164,10 @@ func AssertWallet(k8sClient *client.Client, adbLookupKey *types.NamespacedName) Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) // The default name is xxx-instance-wallet - if adb.Spec.Details.Wallet.Name == nil { + if adb.Spec.Wallet.Name == nil { walletName = adb.Name + "-instance-wallet" } else { - walletName = *adb.Spec.Details.Wallet.Name + walletName = *adb.Spec.Wallet.Name } By("Checking the wallet secret " + walletName + " is created and is not empty") @@ -160,31 +183,7 @@ func AssertWallet(k8sClient *client.Client, adbLookupKey *types.NamespacedName) } } -func CleanupDB(k8sClient *client.Client, dbClient *database.DatabaseClient, namespace string) func() { - return func() { - - Expect(k8sClient).NotTo(BeNil()) - Expect(dbClient).NotTo(BeNil()) - - derefK8sClient := *k8sClient - derefDBClient := *dbClient - - adbList := &dbv1alpha1.AutonomousDatabaseList{} - options := &client.ListOptions{ - Namespace: namespace, - } - derefK8sClient.List(context.TODO(), adbList, options) - - for _, adb := range adbList.Items { - if adb.Spec.Details.AutonomousDatabaseOCID != nil { - By("Terminating database " + *adb.Spec.Details.DbName) - Expect(e2eutil.DeleteAutonomousDatabase(derefDBClient, adb.Spec.Details.AutonomousDatabaseOCID)).Should(Succeed()) - } - } - } -} - -func compartInt(obj1 *int, obj2 *int) bool { +func compareInt(obj1 *int, obj2 *int) bool { if obj1 == nil && obj2 == nil { return true } @@ -194,7 +193,7 @@ func compartInt(obj1 *int, obj2 *int) bool { return *obj1 == *obj2 } -func compartBool(obj1 *bool, obj2 *bool) bool { +func compareBool(obj1 *bool, obj2 *bool) bool { if obj1 == nil && obj2 == nil { return true } @@ -204,7 +203,7 @@ func compartBool(obj1 *bool, obj2 *bool) bool { return *obj1 == *obj2 } -func compartString(obj1 *string, obj2 *string) bool { +func compareString(obj1 *string, obj2 *string) bool { if obj1 == nil && obj2 == nil { return true } @@ -214,7 +213,7 @@ func compartString(obj1 *string, obj2 *string) bool { return *obj1 == *obj2 } -func compartStringMap(obj1 map[string]string, obj2 map[string]string) bool { +func compareStringMap(obj1 map[string]string, obj2 map[string]string) bool { if len(obj1) != len(obj2) { return false } @@ -229,17 +228,16 @@ func compartStringMap(obj1 map[string]string, obj2 map[string]string) bool { return true } -// AssertUpdate changes the displayName from "foo" to "foo_new", and scale the cpuCoreCount to 2 -func AssertUpdate(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName) func() { - return func() { +// UpdateDetails updates spec.details from local resource and OCI +func UpdateDetails(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, newSecretName string, newAdminPassword *string) func() *dbv1alpha1.AutonomousDatabase { + return func() *dbv1alpha1.AutonomousDatabase { // Considering that there are at most two update requests will be sent during the update // From the observation per request takes ~3mins to finish - updateTimeout := time.Minute * 7 - updateInterval := time.Second * 20 Expect(k8sClient).NotTo(BeNil()) Expect(dbClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) + Expect(newAdminPassword).NotTo(BeNil()) derefK8sClient := *k8sClient derefDBClient := *dbClient @@ -252,7 +250,7 @@ func AssertUpdate(k8sClient *client.Client, dbClient *database.DatabaseClient, a // , the List request returns PROVISIONING state. In this case the update request will fail with // conflict state error. Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { - listResp, err := e2eutil.ListAutonomousDatabases(derefDBClient, expectedADB.Spec.Details.CompartmentOCID, expectedADB.Spec.Details.DisplayName) + listResp, err := e2eutil.ListAutonomousDatabases(derefDBClient, expectedADB.Spec.Details.CompartmentId, expectedADB.Spec.Details.DisplayName) if err != nil { return "", err } @@ -262,87 +260,260 @@ func AssertUpdate(k8sClient *client.Client, dbClient *database.DatabaseClient, a } return database.AutonomousDatabaseLifecycleStateEnum(listResp.Items[0].LifecycleState), nil - }, updateTimeout, updateInterval).Should(Equal(database.AutonomousDatabaseLifecycleStateAvailable)) + }, updateADBTimeout, intervalTime).Should(Equal(database.AutonomousDatabaseLifecycleStateAvailable)) // Update var newDisplayName = *expectedADB.Spec.Details.DisplayName + "_new" - var newCPUCoreCount = 2 - By(fmt.Sprintf("Updating the ADB with newDisplayName = %s and newCPUCoreCount = %d\n", newDisplayName, newCPUCoreCount)) + var newCPUCoreCount int + if *expectedADB.Spec.Details.CpuCoreCount == 1 { + newCPUCoreCount = 2 + } else { + newCPUCoreCount = 1 + } + + var newKey = "testKey" + var newVal = "testVal" + + By(fmt.Sprintf("Updating the ADB with newDisplayName = %s, newCPUCoreCount = %d and newFreeformTag = %s:%s\n", + newDisplayName, newCPUCoreCount, newKey, newVal)) expectedADB.Spec.Details.DisplayName = common.String(newDisplayName) - expectedADB.Spec.Details.CPUCoreCount = common.Int(newCPUCoreCount) + expectedADB.Spec.Details.CpuCoreCount = common.Int(newCPUCoreCount) + expectedADB.Spec.Details.FreeformTags = map[string]string{newKey: newVal} + expectedADB.Spec.Details.AdminPassword.K8sSecret.Name = common.String(newSecretName) Expect(derefK8sClient.Update(context.TODO(), expectedADB)).To(Succeed()) - Eventually(func() (bool, error) { - // Get the current ADB resource and retry it's not in AVAILABLE state - currentADB := &dbv1alpha1.AutonomousDatabase{} - derefK8sClient.Get(context.TODO(), *adbLookupKey, currentADB) - if currentADB.Spec.Details.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } + return expectedADB + } +} +// AssertADBDetails asserts the changes in spec.details +func AssertADBDetails(k8sClient *client.Client, + dbClient *database.DatabaseClient, + adbLookupKey *types.NamespacedName, + expectedADB *dbv1alpha1.AutonomousDatabase) func() { + return func() { + // Considering that there are at most two update requests will be sent during the update + // From the observation per request takes ~3mins to finish + + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(adbLookupKey).NotTo(BeNil()) + + derefDBClient := *dbClient + + expectedADBDetails := expectedADB.Spec.Details + Eventually(func() (bool, error) { // Fetch the ADB from OCI when it's in AVAILABLE state, and retry if its attributes doesn't match the new ADB's attributes - retryPolicy := e2eutil.NewLifecycleStateRetryPolicy(database.AutonomousDatabaseLifecycleStateAvailable) - resp, err := e2eutil.GetAutonomousDatabase(derefDBClient, currentADB.Spec.Details.AutonomousDatabaseOCID, &retryPolicy) + retryPolicy := e2eutil.NewLifecycleStateRetryPolicyADB(database.AutonomousDatabaseLifecycleStateAvailable) + resp, err := e2eutil.GetAutonomousDatabase(derefDBClient, expectedADB.Spec.Details.Id, &retryPolicy) if err != nil { return false, err } - adbDetails := currentADB.Spec.Details - - ociADB := currentADB - ociADB = currentADB.UpdateAttrFromOCIAutonomousDatabase(resp.AutonomousDatabase) - ociADBDetails := ociADB.Spec.Details - - // Compare - same := compartString(adbDetails.AutonomousDatabaseOCID, ociADBDetails.AutonomousDatabaseOCID) && - compartString(adbDetails.CompartmentOCID, ociADBDetails.CompartmentOCID) && - compartString(adbDetails.DisplayName, ociADBDetails.DisplayName) && - compartString(adbDetails.DbName, ociADBDetails.DbName) && - adbDetails.DbWorkload == ociADBDetails.DbWorkload && - compartBool(adbDetails.IsDedicated, ociADBDetails.IsDedicated) && - compartString(adbDetails.DbVersion, ociADBDetails.DbVersion) && - compartInt(adbDetails.DataStorageSizeInTBs, ociADBDetails.DataStorageSizeInTBs) && - compartInt(adbDetails.CPUCoreCount, ociADBDetails.CPUCoreCount) && - compartBool(adbDetails.IsAutoScalingEnabled, ociADBDetails.IsAutoScalingEnabled) && - adbDetails.LifecycleState == ociADBDetails.LifecycleState && - compartStringMap(adbDetails.FreeformTags, ociADBDetails.FreeformTags) && - compartString(adbDetails.SubnetOCID, ociADBDetails.SubnetOCID) && - reflect.DeepEqual(adbDetails.NsgOCIDs, ociADBDetails.NsgOCIDs) && - compartString(adbDetails.PrivateEndpoint, ociADBDetails.PrivateEndpoint) && - compartString(adbDetails.PrivateEndpointLabel, ociADBDetails.PrivateEndpointLabel) && - compartString(adbDetails.PrivateEndpointIP, ociADBDetails.PrivateEndpointIP) + debug := false + if debug { + if !compareString(expectedADBDetails.Id, resp.AutonomousDatabase.Id) { + fmt.Fprintf(GinkgoWriter, "Expected OCID: %v\nGot: %v\n", expectedADBDetails.Id, resp.AutonomousDatabase.Id) + } + if !compareString(expectedADBDetails.CompartmentId, resp.AutonomousDatabase.CompartmentId) { + fmt.Fprintf(GinkgoWriter, "Expected CompartmentOCID: %v\nGot: %v\n", expectedADBDetails.CompartmentId, resp.CompartmentId) + } + if !compareString(expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) { + fmt.Fprintf(GinkgoWriter, "Expected DisplayName: %v\nGot: %v\n", expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) + } + if !compareString(expectedADBDetails.DbName, resp.AutonomousDatabase.DbName) { + fmt.Fprintf(GinkgoWriter, "Expected DbName: %v\nGot:%v\n", expectedADBDetails.DbName, resp.AutonomousDatabase.DbName) + } + if expectedADBDetails.DbWorkload != resp.AutonomousDatabase.DbWorkload { + fmt.Fprintf(GinkgoWriter, "Expected DbWorkload: %v\nGot: %v\n", expectedADBDetails.DbWorkload, resp.AutonomousDatabase.DbWorkload) + } + if !compareBool(expectedADBDetails.IsDedicated, resp.AutonomousDatabase.IsDedicated) { + fmt.Fprintf(GinkgoWriter, "Expected IsDedicated: %v\nGot: %v\n", expectedADBDetails.IsDedicated, resp.AutonomousDatabase.IsDedicated) + } + if !compareString(expectedADBDetails.DbVersion, resp.AutonomousDatabase.DbVersion) { + fmt.Fprintf(GinkgoWriter, "Expected DbVersion: %v\nGot: %v\n", expectedADBDetails.DbVersion, resp.AutonomousDatabase.DbVersion) + } + if !compareInt(expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) { + fmt.Fprintf(GinkgoWriter, "Expected DataStorageSize: %v\nGot: %v\n", expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) + } + if !compareInt(expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) { + fmt.Fprintf(GinkgoWriter, "Expected CPUCoreCount: %v\nGot: %v\n", expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) + } + if !compareBool(expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) { + fmt.Fprintf(GinkgoWriter, "Expected IsAutoScalingEnabled: %v\nGot: %v\n", expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) + } + if !compareStringMap(expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) { + fmt.Fprintf(GinkgoWriter, "Expected FreeformTags: %v\nGot: %v\n", expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) + } + if !compareBool(expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) { + fmt.Fprintf(GinkgoWriter, "Expected IsAccessControlEnabled: %v\nGot: %v\n", expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) + } + if !reflect.DeepEqual(expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) { + fmt.Fprintf(GinkgoWriter, "Expected AccessControlList: %v\nGot: %v\n", expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) + } + if !compareBool(expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) { + fmt.Fprintf(GinkgoWriter, "Expected IsMTLSConnectionRequired: %v\nGot: %v\n", expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) + } + if !compareString(expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) { + fmt.Fprintf(GinkgoWriter, "Expected SubnetOCID: %v\nGot: %v\n", expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) + } + if !reflect.DeepEqual(expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) { + fmt.Fprintf(GinkgoWriter, "Expected NsgOCIDs: %v\nGot: %v\n", expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) + } + if !compareString(expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) { + fmt.Fprintf(GinkgoWriter, "Expected PrivateEndpointLabel: %v\nGot: %v\n", expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) + } + } + + // Compare the elements one by one rather than doing reflect.DeelEqual(adb1, adb2), since some parameters + // (e.g. adminPassword, wallet) are missing from e2eutil.GetAutonomousDatabase(). + // We don't compare LifecycleState in this case. We only make sure that the ADB is in AVAIABLE state before + // proceeding to the next test. + same := compareString(expectedADBDetails.Id, resp.AutonomousDatabase.Id) && + compareString(expectedADBDetails.CompartmentId, resp.AutonomousDatabase.CompartmentId) && + compareString(expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) && + compareString(expectedADBDetails.DbName, resp.AutonomousDatabase.DbName) && + expectedADBDetails.DbWorkload == resp.AutonomousDatabase.DbWorkload && + compareBool(expectedADBDetails.IsDedicated, resp.AutonomousDatabase.IsDedicated) && + compareString(expectedADBDetails.DbVersion, resp.AutonomousDatabase.DbVersion) && + compareInt(expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) && + compareInt(expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) && + compareBool(expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) && + compareStringMap(expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) && + compareBool(expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) && + reflect.DeepEqual(expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) && + compareBool(expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) && + compareString(expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) && + reflect.DeepEqual(expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) && + compareString(expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) return same, nil - }, updateTimeout, updateInterval).Should(BeTrue()) + }, updateADBTimeout, intervalTime).Should(BeTrue()) + + // IMPORTANT: make sure the local resource has finished reconciling, otherwise the changes will + // be conflicted with the next test and cause unknow result. + AssertADBLocalState(k8sClient, adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() + } +} + +func TestNetworkAccessRestricted(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, acl []string, isMTLSConnectionRequired bool) func() { + return func() { + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, nil, nil, acl, isMTLSConnectionRequired)() + } +} + +/* Runs a script that connects to an ADB */ +func AssertAdminPassword(dbClient *database.DatabaseClient, databaseOCID *string, tnsEntry *string, adminPassword *string, walletPassword *string) error { + By("Downloading wallet zip") + walletZip, err := e2eutil.DownloadWalletZip(*dbClient, databaseOCID, walletPassword) + if err != nil { + fmt.Fprint(GinkgoWriter, err) + panic(err) + } + fmt.Fprint(GinkgoWriter, walletZip+" successfully downloaded.\n") + + By("Installing SQLcl") + if _, err := os.Stat("sqlcl-latest.zip"); errors.Is(err, os.ErrNotExist) { + cmd := exec.Command("wget", "https://download.oracle.com/otn_software/java/sqldeveloper/sqlcl-latest.zip") + _, err = cmd.Output() + Expect(err).To(BeNil()) + cmd = exec.Command("unzip", "sqlcl-latest.zip") + _, err = cmd.Output() + Expect(err).To(BeNil()) + } + + proxy := os.Getenv("HTTP_PROXY") + + By("Verify the adb connection") + cmd := exec.Command("./sqlcl/bin/sql", "/nolog", "@verify_connection.sql", proxy, walletZip, *adminPassword, strings.ToLower(*tnsEntry)) + stdout, err := cmd.Output() + + fmt.Fprint(GinkgoWriter, string(stdout)) + + return err +} + +func TestNetworkAccessPrivate(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, isMTLSConnectionRequired bool, subnetOCID *string, nsgOCIDs *string) func() { + return func() { + Expect(*subnetOCID).ToNot(Equal("")) + Expect(*nsgOCIDs).ToNot(Equal("")) + + adb := &dbv1alpha1.AutonomousDatabase{} + derefK8sClient := *k8sClient + Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).Should(Succeed()) + + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, subnetOCID, nsgOCIDs, nil, isMTLSConnectionRequired)() + } +} + +func TestNetworkAccessPublic(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName) func() { + return func() { + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, nil, nil, nil, true)() } } -// Updates adb state and then asserts if change is propagated to OCI -func UpdateAndAssertState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { +func TestNetworkAccess(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, subnetOCID *string, nsgOCIDs *string, acl []string, isMTLSConnectionRequired bool) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(adbLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + + adb := &dbv1alpha1.AutonomousDatabase{} + AssertADBState(k8sClient, dbClient, adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() + Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) + + adb.Spec.Details.SubnetId = subnetOCID + adb.Spec.Details.NsgIds = []string{*nsgOCIDs} + adb.Spec.Details.WhitelistedIps = acl + adb.Spec.Details.IsMtlsConnectionRequired = common.Bool(isMTLSConnectionRequired) + Expect(derefK8sClient.Update(context.TODO(), adb)).To(Succeed()) + AssertADBDetails(k8sClient, dbClient, adbLookupKey, adb)() + } +} + +// UpdateAndAssertDetails changes the below fields: +// displayName: "bar" -> "bar_new" +// adminPassword: "foo" -> "foo_new", +// cpuCoreCount: from 1 to 2, or from 2 to 1 +func UpdateAndAssertDetails(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, newSecretName string, newAdminPassword *string, walletPassword *string) func() { + return func() { + expectedADB := UpdateDetails(k8sClient, dbClient, adbLookupKey, newSecretName, newAdminPassword)() + AssertADBDetails(k8sClient, dbClient, adbLookupKey, expectedADB)() + + ocid := expectedADB.Spec.Details.Id + tnsEntry := *expectedADB.Spec.Details.DbName + "_high" + err := AssertAdminPassword(dbClient, ocid, &tnsEntry, newAdminPassword, walletPassword) + Expect(err).ShouldNot(HaveOccurred()) + } +} + +// UpdateAndAssertADBState updates adb state and then asserts if change is propagated to OCI +func UpdateAndAssertADBState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { UpdateState(k8sClient, adbLookupKey, state)() - AssertState(k8sClient, dbClient, adbLookupKey, state)() + AssertADBState(k8sClient, dbClient, adbLookupKey, state)() } } -// Assert local and remote state -func AssertState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { +// AssertADBState asserts local and remote state +func AssertADBState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { // Waits longer for the local resource to reach the desired state - AssertLocalState(k8sClient, adbLookupKey, state)() + AssertADBLocalState(k8sClient, adbLookupKey, state)() // Double-check the state of the DB in OCI so the timeout can be shorter - AssertRemoteState(k8sClient, dbClient, adbLookupKey, state)() + AssertADBRemoteState(k8sClient, dbClient, adbLookupKey, state)() } } +// AssertHardLinkDelete asserts the database is terminated in OCI when hardLink is set to true func AssertHardLinkDelete(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName) func() { return func() { - changeStateTimeout := time.Second * 300 - Expect(k8sClient).NotTo(BeNil()) Expect(dbClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -354,22 +525,20 @@ func AssertHardLinkDelete(k8sClient *client.Client, dbClient *database.DatabaseC Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) Expect(derefK8sClient.Delete(context.TODO(), adb)).To(Succeed()) - AssertSoftLinkDelete(k8sClient, adbLookupKey)() - By("Checking if the ADB in OCI is in TERMINATING state") // Check every 10 secs for total 60 secs Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { - retryPolicy := e2eutil.NewLifecycleStateRetryPolicy(database.AutonomousDatabaseLifecycleStateTerminating) - return returnRemoteState(derefK8sClient, derefDBClient, adb.Spec.Details.AutonomousDatabaseOCID, &retryPolicy) - }, changeStateTimeout).Should(Equal(database.AutonomousDatabaseLifecycleStateTerminating)) + retryPolicy := e2eutil.NewLifecycleStateRetryPolicyADB(database.AutonomousDatabaseLifecycleStateTerminating) + return returnADBRemoteState(derefK8sClient, derefDBClient, adb.Spec.Details.Id, &retryPolicy) + }, changeTimeout).Should(Equal(database.AutonomousDatabaseLifecycleStateTerminating)) + + AssertSoftLinkDelete(k8sClient, adbLookupKey)() } } +// AssertSoftLinkDelete asserts the database remains in OCI when hardLink is set to false func AssertSoftLinkDelete(k8sClient *client.Client, adbLookupKey *types.NamespacedName) func() { return func() { - changeStateTimeout := time.Second * 300 - changeStateInterval := time.Second * 10 - Expect(k8sClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -389,14 +558,13 @@ func AssertSoftLinkDelete(k8sClient *client.Client, adbLookupKey *types.Namespac return } return - }, changeStateTimeout, changeStateInterval).Should(Equal(true)) + }, changeTimeout, intervalTime).Should(Equal(true)) } } -func AssertLocalState(k8sClient *client.Client, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { +// AssertADBLocalState asserts the lifecycle state of the local resource using adbLookupKey +func AssertADBLocalState(k8sClient *client.Client, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { - changeLocalStateTimeout := time.Second * 600 - Expect(k8sClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -404,14 +572,13 @@ func AssertLocalState(k8sClient *client.Client, adbLookupKey *types.NamespacedNa By("Checking if the lifecycleState of local resource is " + string(state)) Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { - return returnLocalState(derefK8sClient, *adbLookupKey) + return returnADBLocalState(derefK8sClient, *adbLookupKey) }, changeLocalStateTimeout).Should(Equal(state)) } } -func AssertRemoteState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { +func AssertADBRemoteState(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { - Expect(k8sClient).NotTo(BeNil()) Expect(dbClient).NotTo(BeNil()) Expect(adbLookupKey).NotTo(BeNil()) @@ -420,34 +587,46 @@ func AssertRemoteState(k8sClient *client.Client, dbClient *database.DatabaseClie adb := &dbv1alpha1.AutonomousDatabase{} Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) - - AssertRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.AutonomousDatabaseOCID, state)() + By("Checking if the lifecycleState of remote resource is " + string(state)) + AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.Id, state, changeTimeout)() } } -// Assert remote state using adb OCID -func AssertRemoteStateOCID(k8sClient *client.Client, dbClient *database.DatabaseClient, adbID *string, state database.AutonomousDatabaseLifecycleStateEnum) func() { +// Backup takes ~15 minutes to complete, this function waits 20 minutes until ADB state is AVAILABLE +func AssertADBRemoteStateForBackupRestore(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { - changeRemoteStateTimeout := time.Second * 300 - changeRemoteStateInterval := time.Second * 10 + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(adbLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + adb := &dbv1alpha1.AutonomousDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) + By("Checking if the lifecycleState of remote resource is " + string(state)) + AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.Id, state, backupTimeout)() + } +} + +func AssertADBRemoteStateOCID(k8sClient *client.Client, dbClient *database.DatabaseClient, adbID *string, state database.AutonomousDatabaseLifecycleStateEnum, timeout time.Duration) func() { + return func() { Expect(k8sClient).NotTo(BeNil()) Expect(dbClient).NotTo(BeNil()) Expect(adbID).NotTo(BeNil()) - fmt.Fprintf(GinkgoWriter, "ADB ID is %s", *adbID) + fmt.Fprintf(GinkgoWriter, "ADB ID is %s\n", *adbID) derefK8sClient := *k8sClient derefDBClient := *dbClient By("Checking if the lifecycleState of the ADB in OCI is " + string(state)) Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { - return returnRemoteState(derefK8sClient, derefDBClient, adbID, nil) - }, changeRemoteStateTimeout, changeRemoteStateInterval).Should(Equal(state)) + return returnADBRemoteState(derefK8sClient, derefDBClient, adbID, nil) + }, timeout, intervalTime).Should(Equal(state)) } } -// Updates state from local resource and OCI +// UpdateState updates state from local resource and OCI func UpdateState(k8sClient *client.Client, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { return func() { Expect(k8sClient).NotTo(BeNil()) @@ -458,13 +637,20 @@ func UpdateState(k8sClient *client.Client, adbLookupKey *types.NamespacedName, s adb := &dbv1alpha1.AutonomousDatabase{} Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) - adb.Spec.Details.LifecycleState = state By("Updating adb state to " + string(state)) + switch state { + case database.AutonomousDatabaseLifecycleStateAvailable: + adb.Spec.Action = "Start" + case database.AutonomousDatabaseLifecycleStateStopped: + adb.Spec.Action = "Stop" + case database.AutonomousDatabaseLifecycleStateTerminated: + adb.Spec.Action = "Terminate" + } Expect(derefK8sClient.Update(context.TODO(), adb)).To(Succeed()) } } -func returnLocalState(k8sClient client.Client, adbLookupKey types.NamespacedName) (database.AutonomousDatabaseLifecycleStateEnum, error) { +func returnADBLocalState(k8sClient client.Client, adbLookupKey types.NamespacedName) (database.AutonomousDatabaseLifecycleStateEnum, error) { adb := &dbv1alpha1.AutonomousDatabase{} err := k8sClient.Get(context.TODO(), adbLookupKey, adb) if err != nil { @@ -473,10 +659,299 @@ func returnLocalState(k8sClient client.Client, adbLookupKey types.NamespacedName return adb.Status.LifecycleState, nil } -func returnRemoteState(k8sClient client.Client, dbClient database.DatabaseClient, adbID *string, retryPolicy *common.RetryPolicy) (database.AutonomousDatabaseLifecycleStateEnum, error) { +func returnADBRemoteState(k8sClient client.Client, dbClient database.DatabaseClient, adbID *string, retryPolicy *common.RetryPolicy) (database.AutonomousDatabaseLifecycleStateEnum, error) { resp, err := e2eutil.GetAutonomousDatabase(dbClient, adbID, retryPolicy) if err != nil { return "", err } return resp.LifecycleState, nil } + +func returnACDLocalState(k8sClient client.Client, acdLookupKey types.NamespacedName) (database.AutonomousContainerDatabaseLifecycleStateEnum, error) { + acd := &dbv1alpha1.AutonomousContainerDatabase{} + err := k8sClient.Get(context.TODO(), acdLookupKey, acd) + if err != nil { + return "", err + } + return acd.Status.LifecycleState, nil +} + +func returnACDRemoteState(k8sClient client.Client, dbClient database.DatabaseClient, acdID *string, retryPolicy *common.RetryPolicy) (database.AutonomousContainerDatabaseLifecycleStateEnum, error) { + resp, err := e2eutil.GetAutonomousContainerDatabase(dbClient, acdID, retryPolicy) + if err != nil { + return "", err + } + return resp.LifecycleState, nil +} + +/* Runs a script that connects to an ADB and configures the backup bucket */ +func ConfigureADBBackup(dbClient *database.DatabaseClient, databaseOCID *string, tnsEntry *string, adminPassword *string, walletPassword *string, bucket *string, authToken *string, ociUser *string) error { + + By("Downloading wallet zip") + walletZip, err := e2eutil.DownloadWalletZip(*dbClient, databaseOCID, walletPassword) + if err != nil { + fmt.Fprint(GinkgoWriter, err) + panic(err) + } + fmt.Fprint(GinkgoWriter, walletZip+" successfully downloaded.\n") + + By("Installing SQLcl") + if _, err := os.Stat("sqlcl-latest.zip"); errors.Is(err, os.ErrNotExist) { + cmd := exec.Command("wget", "https://download.oracle.com/otn_software/java/sqldeveloper/sqlcl-latest.zip") + _, err = cmd.Output() + Expect(err).To(BeNil()) + cmd = exec.Command("unzip", "sqlcl-latest.zip") + _, err = cmd.Output() + Expect(err).To(BeNil()) + } + + proxy := os.Getenv("HTTP_PROXY") + + By("Configuring adb backup bucket") + cmd := exec.Command("./sqlcl/bin/sql", "/nolog", "@backup.sql", proxy, walletZip, *adminPassword, strings.ToLower(*tnsEntry), *bucket, *ociUser, *authToken) + stdout, err := cmd.Output() + + fmt.Fprint(GinkgoWriter, string(stdout)) + + return err +} + +func AssertBackupRestore(k8sClient *client.Client, dbClient *database.DatabaseClient, backupRestoreLookupKey *types.NamespacedName, adbLookupKey *types.NamespacedName, state database.AutonomousDatabaseLifecycleStateEnum) func() { + return func() { + // After creating a backup, ADB status will change to BACKUP IN PROGRESS + // for ~7 minutes. After that time, the state should return to AVAILBLE + derefK8sClient := *k8sClient + + AssertADBRemoteState(k8sClient, dbClient, adbLookupKey, state)() + + By("Wait until ADB state returns to AVAILABLE") + AssertADBRemoteStateForBackupRestore(k8sClient, dbClient, adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() + + if state == database.AutonomousDatabaseLifecycleStateBackupInProgress { + By("Checking adb backup State is ACTIVE") + createdBackup := &dbv1alpha1.AutonomousDatabaseBackup{} + Eventually(func() (database.AutonomousDatabaseBackupLifecycleStateEnum, error) { + derefK8sClient.Get(context.TODO(), *backupRestoreLookupKey, createdBackup) + return createdBackup.Status.LifecycleState, nil + }, backupTimeout, time.Second*20).Should(Equal(database.AutonomousDatabaseBackupLifecycleStateActive)) + } else { + By("Checking adb restore State is SUCCEEDED") + createdRestore := &dbv1alpha1.AutonomousDatabaseRestore{} + Eventually(func() (workrequests.WorkRequestStatusEnum, error) { + derefK8sClient.Get(context.TODO(), *backupRestoreLookupKey, createdRestore) + return createdRestore.Status.Status, nil + }, backupTimeout, time.Second*20).Should(Equal(workrequests.WorkRequestStatusSucceeded)) + } + } +} + +func AssertACDState(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName, state database.AutonomousContainerDatabaseLifecycleStateEnum, timeout time.Duration) func() { + return func() { + AssertACDLocalState(k8sClient, acdLookupKey, state, timeout)() + AssertACDRemoteState(k8sClient, dbClient, acdLookupKey, state, timeout)() + } +} + +func AssertACDLocalState(k8sClient *client.Client, acdLookupKey *types.NamespacedName, state database.AutonomousContainerDatabaseLifecycleStateEnum, timeout time.Duration) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(acdLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + + By("Checking if the lifecycleState of local resource is " + string(state)) + Eventually(func() (database.AutonomousContainerDatabaseLifecycleStateEnum, error) { + return returnACDLocalState(derefK8sClient, *acdLookupKey) + }, timeout).Should(Equal(state)) + } +} + +func AssertACDRemoteState(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName, state database.AutonomousContainerDatabaseLifecycleStateEnum, timeout time.Duration) func() { + return func() { + derefK8sClient := *k8sClient + + acd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *acdLookupKey, acd)).To(Succeed()) + By("Checking if the lifecycleState of remote resource is " + string(state)) + AssertACDRemoteStateOCID(k8sClient, dbClient, acd.Spec.AutonomousContainerDatabaseOCID, state, timeout)() + } +} + +func AssertACDRemoteStateOCID(k8sClient *client.Client, dbClient *database.DatabaseClient, acdID *string, state database.AutonomousContainerDatabaseLifecycleStateEnum, timeout time.Duration) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(acdID).NotTo(BeNil()) + + fmt.Fprintf(GinkgoWriter, "ACD ID is %s\n", *acdID) + + derefK8sClient := *k8sClient + derefDBClient := *dbClient + + By("Checking if the lifecycleState of the ACD in OCI is " + string(state)) + Eventually(func() (database.AutonomousContainerDatabaseLifecycleStateEnum, error) { + return returnACDRemoteState(derefK8sClient, derefDBClient, acdID, nil) + }, timeout, intervalTime).Should(Equal(state)) + } +} + +func AssertACDBind(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName, state database.AutonomousContainerDatabaseLifecycleStateEnum) func() { + return func() { + + // ACD state should be AVAILABLE + acdBindTimeout := time.Minute * 3 + + By("Wait until ACD is in state AVAILABLE") + AssertACDState(k8sClient, dbClient, acdLookupKey, state, acdBindTimeout)() + } +} + +func UpdateAndAssertACDSpec(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName) func() { + return func() { + expectedACD := UpdateACDSpec(k8sClient, dbClient, acdLookupKey)() + AssertACDSpec(k8sClient, dbClient, acdLookupKey, expectedACD)() + } +} + +func UpdateACDSpec(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName) func() *dbv1alpha1.AutonomousContainerDatabase { + return func() *dbv1alpha1.AutonomousContainerDatabase { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + + derefK8sClient := *k8sClient + expectedAcd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *acdLookupKey, expectedAcd)).To(Succeed()) + + expectedAcd.Spec.DisplayName = common.String(*expectedAcd.Spec.DisplayName + "_new") + + Expect(derefK8sClient.Update(context.TODO(), expectedAcd)).To(Succeed()) + return expectedAcd + } +} + +func AssertACDSpec(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName, expectedACD *dbv1alpha1.AutonomousContainerDatabase) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(acdLookupKey).NotTo(BeNil()) + + derefDBClient := *dbClient + + expectedACDSpec := expectedACD.Spec + Eventually(func() (bool, error) { + // Fetch the ACD from OCI when it's in AVAILABLE state, and retry if its attributes doesn't match the new ACD's attributes + retryPolicy := e2eutil.NewLifecycleStateRetryPolicyACD(database.AutonomousContainerDatabaseLifecycleStateAvailable) + resp, err := e2eutil.GetAutonomousContainerDatabase(derefDBClient, expectedACD.Spec.AutonomousContainerDatabaseOCID, &retryPolicy) + if err != nil { + return false, err + } + + debug := true + if debug { + if !compareString(expectedACDSpec.AutonomousContainerDatabaseOCID, resp.AutonomousContainerDatabase.Id) { + fmt.Fprintf(GinkgoWriter, "Expected OCID: %v\nGot: %v\n", expectedACDSpec.AutonomousContainerDatabaseOCID, resp.AutonomousContainerDatabase.Id) + } + if !compareString(expectedACDSpec.CompartmentOCID, resp.AutonomousContainerDatabase.CompartmentId) { + fmt.Fprintf(GinkgoWriter, "Expected CompartmentOCID: %v\nGot: %v\n", expectedACDSpec.CompartmentOCID, resp.CompartmentId) + } + if !compareString(expectedACDSpec.DisplayName, resp.AutonomousContainerDatabase.DisplayName) { + fmt.Fprintf(GinkgoWriter, "Expected DisplayName: %v\nGot: %v\n", expectedACDSpec.DisplayName, resp.AutonomousContainerDatabase.DisplayName) + } + if !compareString(expectedACDSpec.AutonomousExadataVMClusterOCID, resp.AutonomousContainerDatabase.CloudAutonomousVmClusterId) { + fmt.Fprintf(GinkgoWriter, "Expected AutonomousExadataVMClusterOCID: %v\nGot: %v\n", expectedACDSpec.AutonomousExadataVMClusterOCID, resp.AutonomousContainerDatabase.CloudAutonomousVmClusterId) + } + if !compareStringMap(expectedACDSpec.FreeformTags, resp.AutonomousContainerDatabase.FreeformTags) { + fmt.Fprintf(GinkgoWriter, "Expected FreeformTags: %v\nGot: %v\n", expectedACDSpec.FreeformTags, resp.AutonomousContainerDatabase.FreeformTags) + } + if expectedACDSpec.PatchModel != resp.AutonomousContainerDatabase.PatchModel { + fmt.Fprintf(GinkgoWriter, "Expected PatchModel: %v\nGot: %v\n", expectedACDSpec.PatchModel, resp.AutonomousContainerDatabase.PatchModel) + } + } + + // Compare the elements one by one rather than doing reflect.DeelEqual(adb1, adb2), since some parameters + // (e.g. adminPassword, wallet) are missing from e2eutil.GetAutonomousDatabase(). + // We don't compare LifecycleState in this case. We only make sure that the ADB is in AVAIABLE state before + // proceeding to the next test. + same := compareString(expectedACDSpec.AutonomousContainerDatabaseOCID, resp.AutonomousContainerDatabase.Id) && + compareString(expectedACDSpec.CompartmentOCID, resp.AutonomousContainerDatabase.CompartmentId) && + compareString(expectedACDSpec.DisplayName, resp.AutonomousContainerDatabase.DisplayName) && + compareString(expectedACDSpec.AutonomousExadataVMClusterOCID, resp.AutonomousContainerDatabase.CloudAutonomousVmClusterId) && + compareStringMap(expectedACDSpec.FreeformTags, resp.AutonomousContainerDatabase.FreeformTags) && + expectedACDSpec.PatchModel == resp.AutonomousContainerDatabase.PatchModel + + return same, nil + }, updateACDTimeout, intervalTime).Should(BeTrue()) + + // IMPORTANT: make sure the local resource has finished reconciling, otherwise the changes will + // be conflicted with the next test and cause unknow result. + AssertACDLocalState(k8sClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateAvailable, time.Minute*2)() + } +} + +func AssertACDRestart(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(acdLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + acd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *acdLookupKey, acd)).To(Succeed()) + + acd.Spec.Action = dbv1alpha1.AcdActionRestart + + Expect(derefK8sClient.Update(context.TODO(), acd)) + + // Check ACD status is RESTARTING + AssertACDState(k8sClient, dbClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateRestarting, time.Minute*2)() + // Wait until restart is completed + AssertACDState(k8sClient, dbClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateAvailable, time.Minute*7)() + } +} + +func AssertACDTerminate(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(acdLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + acd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *acdLookupKey, acd)) + + acd.Spec.Action = dbv1alpha1.AcdActionTerminate + Expect(derefK8sClient.Update(context.TODO(), acd)) + + // Check ACD status is TERMINATING + AssertACDState(k8sClient, dbClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateTerminating, time.Minute*2)() + // Wait until status is TERMINATED + AssertACDState(k8sClient, dbClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateTerminated, time.Minute*40)() + } +} + +func AssertACDLocalDelete(k8sClient *client.Client, dbClient *database.DatabaseClient, acdLookupKey *types.NamespacedName) func() { + return func() { + Expect(k8sClient).NotTo(BeNil()) + Expect(dbClient).NotTo(BeNil()) + Expect(acdLookupKey).NotTo(BeNil()) + + derefK8sClient := *k8sClient + existingAcd := &dbv1alpha1.AutonomousContainerDatabase{} + Expect(derefK8sClient.Get(context.TODO(), *acdLookupKey, existingAcd)).To(Succeed()) + Expect(derefK8sClient.Delete(context.TODO(), existingAcd)) + + By("Checking if the AutonomousContainerDatabase resource is deleted") + Eventually(func() (isDeleted bool) { + acd := &dbv1alpha1.AutonomousContainerDatabase{} + isDeleted = false + err := derefK8sClient.Get(context.TODO(), *acdLookupKey, acd) + if err != nil && k8sErrors.IsNotFound(err) { + isDeleted = true + return + } + return + }, changeTimeout, intervalTime).Should(Equal(true)) + + AssertACDRemoteState(k8sClient, dbClient, acdLookupKey, database.AutonomousContainerDatabaseLifecycleStateAvailable, time.Minute*2) + } +} diff --git a/test/e2e/resource/test_config.yaml b/test/e2e/resource/test_config.yaml index 4a5f8027..dc2768e2 100644 --- a/test/e2e/resource/test_config.yaml +++ b/test/e2e/resource/test_config.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # @@ -9,8 +9,20 @@ ociConfigFile: ~/.oci/config profile: DEFAULT # Compartment OCID where the database creates -compartmentOCID: ocid1.compartment.. +compartmentOCID: ocid1.compartment... # The OCID of the OCI Vault Secret that holds the password of the ADMIN account (should start with ocid1.vaultsecret...) adminPasswordOCID: ocid1.vaultsecret... # The OCID of the OCI Vault Secret that holds the password of the wallet (should start with ocid1.vaultsecret...) -instanceWalletPasswordOCID: ocid1.vaultsecret... \ No newline at end of file +instanceWalletPasswordOCID: ocid1.vaultsecret... +# The OCID of the subnet used to test the network access settings +subnetOCID: ocid1.subnet... +# The OCID of the network security group used to test the network access settings +nsgOCID: ocid1.networksecuritygroup... +# The URL of the bucket used for configure ADB on-demand backup +bucketURL: https://swiftobjectstorage.region.oraclecloud.com/v1/namespace-string/bucket_name +# The auth token generated in OCI Console > Profile > User Settings > Auth Token +authToken: token +# The OCI user used to login to OCI Console +ociUser: user +# The Autonomous Exadata VM Cluster used for AutonomousContainerDatabase provision +exadataVMClusterOCID: ocid1.autonomousexainfrastructure... \ No newline at end of file diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 3fd5cd2e..9d9914f7 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,29 +40,29 @@ package e2etest import ( "context" + "fmt" + "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" controllers "github.com/oracle/oracle-database-operator/controllers/database" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -77,6 +77,25 @@ This test suite runs the integration test which checks the following scenario 5. Test ADB binding with hardLink=true **/ +// To avoid dot import +var ( + BeforeSuite = ginkgo.BeforeSuite + AfterSuite = ginkgo.AfterSuite + Describe = ginkgo.Describe + PDescribe = ginkgo.PDescribe + FDescribe = ginkgo.FDescribe + AfterEach = ginkgo.AfterEach + By = ginkgo.By + It = ginkgo.It + FIt = ginkgo.FIt + PIt = ginkgo.PIt + Expect = gomega.Expect + Succeed = gomega.Succeed + HaveOccurred = gomega.HaveOccurred + BeNil = gomega.BeNil + Equal = gomega.Equal +) + var cfg *rest.Config var k8sClient client.Client var configProvider common.ConfigurationProvider @@ -89,26 +108,32 @@ const ADBNamespace string = "default" var SharedOCIConfigMapName = "oci-cred" var SharedOCISecretName = "oci-privatekey" var SharedPlainTextAdminPassword = "Welcome_1234" +var SharedPlainTextNewAdminPassword = "Welcome_1234_new" var SharedPlainTextWalletPassword = "Welcome_1234" var SharedCompartmentOCID string var SharedKeyOCID string var SharedAdminPasswordOCID string var SharedInstanceWalletPasswordOCID string +var SharedSubnetOCID string +var SharedNsgOCID string + +var SharedBucketUrl string +var SharedAuthToken string +var SharedOciUser string +var SharedExadataVMClusterOCID string const SharedAdminPassSecretName string = "adb-admin-password" -const SharedWalletPassSecretName = "adb-wallet-password" +const SharedNewAdminPassSecretName string = "new-adb-admin-password" +const SharedWalletPassSecretName string = "adb-wallet-password" func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.UseDevMode(true))) By("bootstrapping test environment") testEnv = &envtest.Environment{ @@ -141,11 +166,36 @@ var _ = BeforeSuite(func(done Done) { KubeClient: k8sManager.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AutonomousDatabase_test"), Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("AutonomousDatabase_test"), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&controllers.AutonomousDatabaseBackupReconciler{ + KubeClient: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousDatabaseBakcup_test"), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("AutonomousDatabaseBakcup_test"), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&controllers.AutonomousDatabaseRestoreReconciler{ + KubeClient: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousDatabaseRestore_test"), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("AutonomousDatabaseRestore_test"), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&controllers.AutonomousContainerDatabaseReconciler{ + KubeClient: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AutonomousContainerDatabase_test"), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("AutonomousContainerDatabase_test"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) go func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() err = k8sManager.Start(ctrl.SetupSignalHandler()) Expect(err).ToNot(HaveOccurred(), "failed to run manager") gexec.KillAndWait(4 * time.Second) @@ -168,12 +218,24 @@ var _ = BeforeSuite(func(done Done) { SharedCompartmentOCID = testConfig.CompartmentOCID SharedAdminPasswordOCID = testConfig.AdminPasswordOCID SharedInstanceWalletPasswordOCID = testConfig.InstanceWalletPasswordOCID + SharedSubnetOCID = testConfig.SubnetOCID + SharedNsgOCID = testConfig.NsgOCID + SharedBucketUrl = testConfig.BucketURL + SharedAuthToken = testConfig.AuthToken + SharedOciUser = testConfig.OciUser + SharedExadataVMClusterOCID = testConfig.ExadataVMClusterOCID By("checking if the required parameters exist") Expect(testConfig.OCIConfigFile).ToNot(Equal("")) Expect(testConfig.CompartmentOCID).ToNot(Equal("")) Expect(testConfig.AdminPasswordOCID).ToNot(Equal("")) Expect(testConfig.InstanceWalletPasswordOCID).ToNot(Equal("")) + Expect(testConfig.SubnetOCID).ToNot(Equal("")) + Expect(testConfig.NsgOCID).ToNot(Equal("")) + Expect(testConfig.BucketURL).ToNot(Equal("")) + Expect(testConfig.AuthToken).ToNot(Equal("")) + Expect(testConfig.OciUser).ToNot(Equal("")) + Expect(testConfig.ExadataVMClusterOCID).ToNot(Equal("")) By("getting OCI provider") ociConfigUtil, err := e2eutil.GetOCIConfigUtil(testConfig.OCIConfigFile, testConfig.Profile) @@ -204,6 +266,15 @@ var _ = BeforeSuite(func(done Done) { Expect(k8sClient.Create(context.TODO(), adminSecret)).To(Succeed()) }) + By("Creating a k8s secret to hold new admin password", func() { + data := map[string]string{ + SharedNewAdminPassSecretName: SharedPlainTextNewAdminPassword, + } + newAdminSecret, err := e2eutil.CreateKubeSecret(ADBNamespace, SharedNewAdminPassSecretName, data) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient.Create(context.TODO(), newAdminSecret)).To(Succeed()) + }) + By("Creating a k8s secret to hold wallet password", func() { data := map[string]string{ SharedWalletPassSecretName: SharedPlainTextWalletPassword, @@ -212,9 +283,7 @@ var _ = BeforeSuite(func(done Done) { Expect(err).ToNot(HaveOccurred()) Expect(k8sClient.Create(context.TODO(), walletSecret)).To(Succeed()) }) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { /* @@ -231,6 +300,22 @@ var _ = AfterSuite(func() { Expect(err).ToNot(HaveOccurred()) */ - By("Deleting the resources that are created during the tests") - e2ebehavior.CleanupDB(&k8sClient, &dbClient, ADBNamespace) + By("Delete the resources that are created during the tests") + adbList := &databasev1alpha1.AutonomousDatabaseList{} + options := &client.ListOptions{ + Namespace: ADBNamespace, + } + k8sClient.List(context.TODO(), adbList, options) + By(fmt.Sprintf("Found %d AutonomousDatabase(s)", len(adbList.Items))) + + for _, adb := range adbList.Items { + if adb.Spec.Details.Id != nil { + By("Terminating database " + *adb.Spec.Details.DbName) + Expect(e2eutil.DeleteAutonomousDatabase(dbClient, adb.Spec.Details.Id)).Should(Succeed()) + } + } + + // Delete sqlcl-latest.zip and sqlcl folder if exists + os.Remove("sqlcl-latest.zip") + os.RemoveAll("sqlcl") }) diff --git a/test/e2e/util/oci_acd_request.go b/test/e2e/util/oci_acd_request.go new file mode 100644 index 00000000..f0a3a841 --- /dev/null +++ b/test/e2e/util/oci_acd_request.go @@ -0,0 +1,78 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package e2eutil + +import ( + "context" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + // "io" + // "io/ioutil" + // "time" +) + +func CreateAutonomousContainerDatabase(dbClient database.DatabaseClient, compartmentId *string, acdName *string, exadataVmClusterID *string) (response database.CreateAutonomousContainerDatabaseResponse, err error) { + acdDetails := database.CreateAutonomousContainerDatabaseDetails{ + DisplayName: acdName, + CloudAutonomousVmClusterId: exadataVmClusterID, + CompartmentId: compartmentId, + PatchModel: database.CreateAutonomousContainerDatabaseDetailsPatchModelUpdates, + } + + createACDRequest := database.CreateAutonomousContainerDatabaseRequest{ + CreateAutonomousContainerDatabaseDetails: acdDetails, + } + + return dbClient.CreateAutonomousContainerDatabase(context.Background(), createACDRequest) +} + +func GetAutonomousContainerDatabase(dbClient database.DatabaseClient, acdOCID *string, retryPolicy *common.RetryPolicy) (database.GetAutonomousContainerDatabaseResponse, error) { + getRequest := database.GetAutonomousContainerDatabaseRequest{ + AutonomousContainerDatabaseId: acdOCID, + } + + if retryPolicy != nil { + getRequest.RequestMetadata = common.RequestMetadata{ + RetryPolicy: retryPolicy, + } + } + + return dbClient.GetAutonomousContainerDatabase(context.TODO(), getRequest) +} diff --git a/test/e2e/util/oci_config_util.go b/test/e2e/util/oci_config_util.go index 70ee1cab..e66e029b 100644 --- a/test/e2e/util/oci_config_util.go +++ b/test/e2e/util/oci_config_util.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -48,7 +48,7 @@ import ( "regexp" "strings" - "github.com/oracle/oci-go-sdk/v45/common" + "github.com/oracle/oci-go-sdk/v65/common" corev1 "k8s.io/api/core/v1" ) @@ -62,8 +62,6 @@ type configUtil struct { //ConfigFileInfo FileInfo *configFileInfo - - provider common.ConfigurationProvider } func (p configUtil) readAndParseConfigFile() (*configFileInfo, error) { @@ -125,17 +123,7 @@ func (p configUtil) CreateOCISecret(secretNamespace string, secretName string) ( } func (p configUtil) GetConfigProvider() (common.ConfigurationProvider, error) { - if p.provider != nil { - return p.provider, nil - } - - newProvider, err := common.ConfigurationProviderFromFileWithProfile(p.OCIConfigPath, p.Profile, "") - if err != nil { - return nil, nil - } - - p.provider = newProvider - return newProvider, nil + return common.ConfigurationProviderFromFileWithProfile(p.OCIConfigPath, p.Profile, "") } func openConfigFile(configFilePath string) (data []byte, err error) { @@ -193,7 +181,7 @@ func parseConfigFile(data []byte, profile string) (info *configFileInfo, err err //Look for profile for i, line := range splitContent { - if match := profileRegex.FindStringSubmatch(line); match != nil && len(match) > 1 && match[1] == profile { + if match := profileRegex.FindStringSubmatch(line); len(match) > 1 && match[1] == profile { start := i + 1 return parseConfigAtLine(start, splitContent) } diff --git a/test/e2e/util/oci_db_request.go b/test/e2e/util/oci_db_request.go index 5fcd3aa1..09cb508a 100644 --- a/test/e2e/util/oci_db_request.go +++ b/test/e2e/util/oci_db_request.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -41,9 +41,10 @@ package e2eutil import ( "context" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/database" - + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "io" + "io/ioutil" "time" ) @@ -130,37 +131,54 @@ func generateRetryPolicy(retryFunc func(r common.OCIOperationResponse) bool) com return common.NewRetryPolicy(attempts, retryFunc, nextDuration) } -func NewDisplayNameRetryPolicy(name string) common.RetryPolicy { +func NewLifecycleStateRetryPolicyADB(lifecycleState database.AutonomousDatabaseLifecycleStateEnum) common.RetryPolicy { shouldRetry := func(r common.OCIOperationResponse) bool { if databaseResponse, ok := r.Response.(database.GetAutonomousDatabaseResponse); ok { // do the retry until lifecycle state reaches the passed terminal state - return databaseResponse.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable || - *databaseResponse.DisplayName != name + return databaseResponse.LifecycleState != lifecycleState } return true } return generateRetryPolicy(shouldRetry) } -func NewCPUCoreCountRetryPolicy(count int) common.RetryPolicy { +func NewLifecycleStateRetryPolicyACD(lifecycleState database.AutonomousContainerDatabaseLifecycleStateEnum) common.RetryPolicy { shouldRetry := func(r common.OCIOperationResponse) bool { - if databaseResponse, ok := r.Response.(database.GetAutonomousDatabaseResponse); ok { + if databaseResponse, ok := r.Response.(database.GetAutonomousContainerDatabaseResponse); ok { // do the retry until lifecycle state reaches the passed terminal state - return databaseResponse.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable || - *databaseResponse.CpuCoreCount != count + return databaseResponse.LifecycleState != lifecycleState } return true } return generateRetryPolicy(shouldRetry) } -func NewLifecycleStateRetryPolicy(lifecycleState database.AutonomousDatabaseLifecycleStateEnum) common.RetryPolicy { - shouldRetry := func(r common.OCIOperationResponse) bool { - if databaseResponse, ok := r.Response.(database.GetAutonomousDatabaseResponse); ok { - // do the retry until lifecycle state reaches the passed terminal state - return databaseResponse.LifecycleState != lifecycleState - } - return true +func DownloadWalletZip(dbClient database.DatabaseClient, databaseOCID *string, walletPassword *string) (string, error) { + + req := database.GenerateAutonomousDatabaseWalletRequest{ + AutonomousDatabaseId: common.String(*databaseOCID), + GenerateAutonomousDatabaseWalletDetails: database.GenerateAutonomousDatabaseWalletDetails{ + Password: common.String(*walletPassword), + }, } - return generateRetryPolicy(shouldRetry) + + resp, err := dbClient.GenerateAutonomousDatabaseWallet(context.TODO(), req) + if err != nil { + return "", err + } + + // Create a temp file wallet*.zip + const walletFileName = "wallet*.zip" + outZip, err := ioutil.TempFile("", walletFileName) + if err != nil { + return "", err + } + defer outZip.Close() + + // Save the wallet in wallet*.zip + if _, err := io.Copy(outZip, resp.Content); err != nil { + return "", err + } + + return outZip.Name(), nil } diff --git a/test/e2e/util/oci_vault_request.go b/test/e2e/util/oci_vault_request.go deleted file mode 100644 index ef7a793c..00000000 --- a/test/e2e/util/oci_vault_request.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -** Copyright (c) 2021 Oracle and/or its affiliates. -** -** The Universal Permissive License (UPL), Version 1.0 -** -** Subject to the condition set forth below, permission is hereby granted to any -** person obtaining a copy of this software, associated documentation and/or data -** (collectively the "Software"), free of charge and under any and all copyright -** rights in the Software, and any and all patent rights owned or freely -** licensable by each licensor hereunder covering either (i) the unmodified -** Software as contributed to or provided by such licensor, or (ii) the Larger -** Works (as defined below), to deal in both -** -** (a) the Software, and -** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if -** one is included with the Software (each a "Larger Work" to which the Software -** is contributed by such licensors), -** -** without restriction, including without limitation the rights to copy, create -** derivative works of, display, perform, and distribute the Software and make, -** use, sell, offer for sale, import, export, have made, and have sold the -** Software and the Larger Work(s), and to sublicense the foregoing rights on -** either these or other terms. -** -** This license is subject to the following condition: -** The above copyright notice and either this complete permission notice or at -** a minimum a reference to the UPL must be included in all copies or -** substantial portions of the Software. -** -** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -** SOFTWARE. - */ - -package e2eutil - -import ( - "context" - "encoding/base64" - "math" - "time" - - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/keymanagement" - "github.com/oracle/oci-go-sdk/v45/vault" -) - -func waitForVaultStatePolicy(state keymanagement.VaultLifecycleStateEnum) common.RetryPolicy { - shouldRetry := func(r common.OCIOperationResponse) bool { - if _, isServiceError := common.IsServiceError(r.Error); isServiceError { - // not service error, could be network error or other errors which prevents - // request send to server, will do retry here - return true - } - - if vaultResponse, ok := r.Response.(keymanagement.GetVaultResponse); ok { - // do the retry until lifecycle state reaches the passed terminal state - return vaultResponse.Vault.LifecycleState != state - } - - return true - } - - return newRetryPolicy(shouldRetry) -} - -func CreateOCIVault(kmsVaultClient keymanagement.KmsVaultClient, compartmentID *string, vaultName *string) (*keymanagement.Vault, error) { - vaultDetails := keymanagement.CreateVaultDetails{ - CompartmentId: compartmentID, - DisplayName: vaultName, - VaultType: keymanagement.CreateVaultDetailsVaultTypeDefault, - } - - request := keymanagement.CreateVaultRequest{ - CreateVaultDetails: vaultDetails, - } - response, err := kmsVaultClient.CreateVault(context.TODO(), request) - if err != nil { - return nil, err - } - - return &response.Vault, nil -} - -func CreateOCIKey(vaultManagementClient keymanagement.KmsManagementClient, compartmentID *string, keyName *string) (*keymanagement.Key, error) { - keyLength := 32 - - keyShape := keymanagement.KeyShape{ - Algorithm: keymanagement.KeyShapeAlgorithmAes, - Length: &keyLength, - } - - createKeyDetails := keymanagement.CreateKeyDetails{ - CompartmentId: compartmentID, - KeyShape: &keyShape, - DisplayName: keyName, - } - - request := keymanagement.CreateKeyRequest{ - CreateKeyDetails: createKeyDetails, - } - response, err := vaultManagementClient.CreateKey(context.TODO(), request) - if err != nil { - return nil, err - } - - return &response.Key, nil -} - -func CreateOCISecret(vaultClient vault.VaultsClient, compartmentID *string, secretName *string, vaultID *string, keyID *string, content *string) (*string, error) { - encoded := base64.StdEncoding.EncodeToString([]byte(*content)) - - base64Content := vault.Base64SecretContentDetails{ - Name: secretName, - Content: common.String(encoded), - } - - details := vault.CreateSecretDetails{ - CompartmentId: compartmentID, - SecretContent: base64Content, - SecretName: secretName, - VaultId: vaultID, - KeyId: keyID, - } - - request := vault.CreateSecretRequest{ - CreateSecretDetails: details, - } - - // Send the request using the service client - response, err := vaultClient.CreateSecret(context.TODO(), request) - if err != nil { - return nil, err - } - - return response.Secret.Id, nil -} - -func getVault(ctx context.Context, client keymanagement.KmsVaultClient, retryPolicy *common.RetryPolicy, vaultID *string) error { - request := keymanagement.GetVaultRequest{ - VaultId: vaultID, - RequestMetadata: common.RequestMetadata{ - RetryPolicy: retryPolicy, - }, - } - if _, err := client.GetVault(ctx, request); err != nil { - return err - } - return nil -} - -func getKey(client keymanagement.KmsManagementClient, retryPolicy *common.RetryPolicy, keyID *string) error { - request := keymanagement.GetKeyRequest{ - KeyId: keyID, - RequestMetadata: common.RequestMetadata{ - RetryPolicy: retryPolicy, - }, - } - if _, err := client.GetKey(context.TODO(), request); err != nil { - return err - } - return nil -} - -func getSecret(client vault.VaultsClient, retryPolicy *common.RetryPolicy, secretID *string) error { - request := vault.GetSecretRequest{ - SecretId: secretID, - RequestMetadata: common.RequestMetadata{ - RetryPolicy: retryPolicy, - }, - } - if _, err := client.GetSecret(context.TODO(), request); err != nil { - return err - } - return nil -} - -func newRetryPolicy(retryOperation func(common.OCIOperationResponse) bool) common.RetryPolicy { - // maximum times of retry - attempts := uint(10) - - nextDuration := func(r common.OCIOperationResponse) time.Duration { - // you might want wait longer for next retry when your previous one failed - // this function will return the duration as: - // 1s, 2s, 4s, 8s, 16s, 32s, 64s etc... - return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second - } - - return common.NewRetryPolicy(attempts, retryOperation, nextDuration) -} - -func WaitForVaultState(client keymanagement.KmsVaultClient, vaultID *string, state keymanagement.VaultLifecycleStateEnum) error { - shouldRetry := func(r common.OCIOperationResponse) bool { - if vaultResponse, ok := r.Response.(keymanagement.GetVaultResponse); ok { - // do the retry until lifecycle state reaches the passed terminal state - return vaultResponse.Vault.LifecycleState != state - } - - return true - } - - lifecycleStateCheckRetryPolicy := newRetryPolicy(shouldRetry) - - return getVault(context.TODO(), client, &lifecycleStateCheckRetryPolicy, vaultID) -} - -func WaitForKeyState(client keymanagement.KmsManagementClient, keyID *string, state keymanagement.KeyLifecycleStateEnum) error { - shouldRetry := func(r common.OCIOperationResponse) bool { - if keyResponse, ok := r.Response.(keymanagement.GetKeyResponse); ok { - // do the retry until lifecycle state reaches the passed terminal state - return keyResponse.Key.LifecycleState != state - } - - return true - } - - lifecycleStateCheckRetryPolicy := newRetryPolicy(shouldRetry) - - return getKey(client, &lifecycleStateCheckRetryPolicy, keyID) -} - -func WaitForSecretState(client vault.VaultsClient, secretID *string, state vault.SecretLifecycleStateEnum) error { - shouldRetry := func(r common.OCIOperationResponse) bool { - if secretResponse, ok := r.Response.(vault.GetSecretResponse); ok { - // do the retry until lifecycle state reaches the passed terminal state - return secretResponse.Secret.LifecycleState != state - } - - return true - } - - lifecycleStateCheckRetryPolicy := newRetryPolicy(shouldRetry) - - return getSecret(client, &lifecycleStateCheckRetryPolicy, secretID) -} - -// CleanupVault deletes the vault -// Anything encrypted by the keys contained within this vault will be unusable or irretrievable after the vault has been deleted -func CleanupVault(kmsVaultClient keymanagement.KmsVaultClient, vaultID *string) error { - if vaultID == nil { - return nil - } - - request := keymanagement.ScheduleVaultDeletionRequest{ - VaultId: vaultID, - } - if _, err := kmsVaultClient.ScheduleVaultDeletion(context.TODO(), request); err != nil { - return err - } - return nil -} diff --git a/test/e2e/util/oci_work_request.go b/test/e2e/util/oci_work_request.go index 4721fa0a..9751ef63 100644 --- a/test/e2e/util/oci_work_request.go +++ b/test/e2e/util/oci_work_request.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -41,8 +41,8 @@ package e2eutil import ( "context" - "github.com/oracle/oci-go-sdk/v45/common" - "github.com/oracle/oci-go-sdk/v45/workrequests" + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/workrequests" "time" ) diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index 885985f1..b9e76aa7 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2021 Oracle and/or its affiliates. +** Copyright (c) 2022 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -44,7 +44,7 @@ import ( "strings" "time" - goyaml "gopkg.in/yaml.v2" + goyaml "gopkg.in/yaml.v3" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" @@ -56,12 +56,20 @@ import ( // GenerateDBName returns a string DB concatenate 14 digits of the date time // E.g., DB060102150405 if the curret date-time is 2006.01.02 15:04:05 func GenerateDBName() string { + return "DB" + getDateTimeString() +} + +func GenerateACDName() string { + return "ACD" + getDateTimeString() +} + +func getDateTimeString() string { timeString := time.Now().Format("2006.01.02 15:04:05") trimmed := strings.ReplaceAll(timeString, ":", "") // remove colons trimmed = strings.ReplaceAll(trimmed, ".", "") // remove dots trimmed = strings.ReplaceAll(trimmed, " ", "") // remove spaces trimmed = trimmed[2:] // remove the first two digits of year (2006 -> 06) - return "DB" + trimmed + return trimmed } func unmarshalFromYamlBytes(bytes []byte, obj interface{}) error { @@ -73,7 +81,7 @@ func unmarshalFromYamlBytes(bytes []byte, obj interface{}) error { return json.Unmarshal(jsonBytes, obj) } -// LoadTestFixture create an AutonomousDatabase resoursce from a test fixture +// LoadTestFixture create an AutonomousDatabase resource from a test fixture func LoadTestFixture(adb *dbv1alpha1.AutonomousDatabase, filename string) (*dbv1alpha1.AutonomousDatabase, error) { filePath := "./resource/" + filename yamlBytes, err := ioutil.ReadFile(filePath) @@ -119,6 +127,12 @@ type testConfiguration struct { CompartmentOCID string `yaml:"compartmentOCID"` AdminPasswordOCID string `yaml:"adminPasswordOCID"` InstanceWalletPasswordOCID string `yaml:"instanceWalletPasswordOCID"` + SubnetOCID string `yaml:"subnetOCID"` + NsgOCID string `yaml:"nsgOCID"` + BucketURL string `yaml:"bucketURL"` + AuthToken string `yaml:"authToken"` + OciUser string `yaml:"ociUser"` + ExadataVMClusterOCID string `yaml:"exadataVMClusterOCID"` } func GetTestConfig(filename string) (*testConfiguration, error) { diff --git a/test/e2e/verify_connection.sql b/test/e2e/verify_connection.sql new file mode 100644 index 00000000..85ded8ad --- /dev/null +++ b/test/e2e/verify_connection.sql @@ -0,0 +1,4 @@ +set cloudconfig -proxy=&1 &2 +connect ADMIN/&3@&4 +select 1 from dual; +exit \ No newline at end of file